diff --git a/Jenkinsfile2 b/Jenkinsfile2 index 54234cc547..9c28489a39 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -387,7 +387,7 @@ pipeline { } steps { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 55, unit: 'MINUTES'){ + timeout(time: 75, unit: 'MINUTES'){ pre_test_win() pre_test_build_win() run_win_ctest() diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 94ed46e5e2..ae3b626f88 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 634399d + GIT_TAG 61cbfd2 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 2eb4bba309..b32d2af5bb 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -796,19 +796,23 @@ HISTOGRAM(expr,bin_type, bin_description, normalized) ### PERCENTILE ```sql -PERCENTILE(expr, p) +PERCENTILE(expr, p [, p1] ...) ``` **Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned. -**Return value type**: DOUBLE +**Return value type**: This function takes 2 minumum and 11 maximum parameters, and it can simultaneously return 10 percentiles at most. If 2 parameters are given, a single percentile is returned and the value type is DOUBLE. + If more than 2 parameters are given, the return value type is a VARCHAR string, the format of which is a JSON ARRAY containing all return values. **Applicable column types**: Numeric **Applicable table types**: table only -**More explanations**: _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX. +**More explanations**: +- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX. +- When calculating multiple percentiles of a specific column, a single PERCENTILE function with multiple parameters is adviced, as this can largely reduce the query response time. + For example, using SELECT percentile(col, 90, 95, 99) FROM table will perform better than SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table. ## Selection Functions diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md index b9278c6961..5a48f2e4b1 100644 --- a/docs/en/14-reference/11-docker/index.md +++ b/docs/en/14-reference/11-docker/index.md @@ -273,49 +273,48 @@ password: taosdata ## Start the TDengine cluster with docker-compose -1. The following docker-compose file starts a TDengine cluster with two replicas, two management nodes, two data nodes, and one arbitrator. +1. The following docker-compose file starts a TDengine cluster with three nodes. - ```docker - version: "3" - services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - td-1: - image: tdengine/tdengine:$VERSION - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - ``` +```yml +version: "3" +services: + td-1: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + td-3: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-3" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td3:/var/lib/taos/ + - taoslog-td3:/var/log/taos/ +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: + taosdata-td3: + taoslog-td3: +``` :::note - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time -- `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] - We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. ::: 2. Start the cluster @@ -345,17 +344,18 @@ password: taosdata 4. Show dnodes via TDengine CLI - ```shell - $ docker-compose exec td-1 taos -s "show dnodes" +```shell +$ docker-compose exec td-1 taos -s "show dnodes" - taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | - ====================================================================================================================================== - 1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | | - 2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | | - 0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - | - Query OK, 3 row(s) in set (0.000811s) - ``` +taos> show dnodes + id | endpoint | vnodes | support_vnodes | status | create_time | note | +====================================================================================================================================== + 1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | | + 2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | | + 3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | | +Query OK, 3 rows in database (0.021262s) + +``` ## taosAdapter @@ -373,83 +373,70 @@ password: taosdata Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: - ```docker - version: "3" +```yml +version: "3" - networks: - inter: - api: +networks: + inter: - services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - networks: - - inter - td-1: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter - networks: - - inter - environment: - TAOS_FIRST_EP: "td-1" - TAOS_SECOND_EP: "td-2" - deploy: - replicas: 4 - nginx: - image: nginx - depends_on: - - adapter - networks: - - inter - - api - ports: - - 6041:6041 - - 6044:6044/udp - command: [ - "sh", - "-c", - "while true; - do curl -s http://adapter:6041/-/ping >/dev/null && break; - done; - printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' - > /etc/nginx/conf.d/rest.conf; - printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' - >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; - nginx -g 'daemon off;'", - ] - volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - ``` +services: + td-1: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + adapter: + image: tdengine/tdengine:$VERSION + entrypoint: "taosadapter" + networks: + - inter + environment: + TAOS_FIRST_EP: "td-1" + TAOS_SECOND_EP: "td-2" + deploy: + replicas: 4 + nginx: + image: nginx + depends_on: + - adapter + networks: + - inter + ports: + - 6041:6041 + - 6044:6044/udp + command: [ + "sh", + "-c", + "while true; + do curl -s http://adapter:6041/-/ping >/dev/null && break; + done; + printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' + > /etc/nginx/conf.d/rest.conf; + printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' + >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; + nginx -g 'daemon off;'", + ] +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: +``` ## Deploy with docker swarm diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/_03-immigrate.md similarity index 99% rename from docs/en/25-application/03-immigrate.md rename to docs/en/25-application/_03-immigrate.md index 30d069e4e2..5f4a86937e 100644 --- a/docs/en/25-application/03-immigrate.md +++ b/docs/en/25-application/_03-immigrate.md @@ -184,7 +184,7 @@ TDengine supports the standard JDBC 3.0 interface for manipulating databases, bu To facilitate historical data migration, we provide a plug-in for the data synchronization tool DataX, which can automatically write data into TDengine.The automatic data migration of DataX can only support the data migration process of a single value model. -For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html). +For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/engineering/16401.html). After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration. diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 647f917422..94f8052051 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -798,18 +798,22 @@ HISTOGRAM(expr,bin_type, bin_description, normalized) ### PERCENTILE ```sql -PERCENTILE(expr, p) +PERCENTILE(expr, p [, p1] ... ) ``` **功能说明**:统计表中某列的值百分比分位数。 -**返回数据类型**: DOUBLE。 +**返回数据类型**: 该函数最小参数个数为 2 个,最大参数个数为 11 个。可以最多同时返回 10 个百分比分位数。当参数个数为 2 时, 返回一个分位数, 类型为DOUBLE,当参数个数大于 2 时,返回类型为VARCHAR, 格式为包含多个返回值的JSON数组。 **应用字段**:数值类型。 **适用于**:表。 -**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。 +**使用说明**: + +- *P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX; +- 同时计算针对同一列的多个分位数时,建议使用一个PERCENTILE函数和多个参数的方式,能很大程度上降低查询的响应时间。 + 比如,使用查询SELECT percentile(col, 90, 95, 99) FROM table, 性能会优于SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table。 ## 选择函数 diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 0a326729f2..c8f4afc06b 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -197,7 +197,7 @@ Total_Size. : 表 d0 所有 block 在文件中占用的大小为 93.65 KB Average_size: 平均每个 block 在文件中占用的空间大小为 18.73 KB -Compression_Ratio: 数据压缩率为 23.98% +Compression_Ratio: 数据压缩率 23.98% *************************** 2.row *************************** @@ -212,16 +212,18 @@ MinRows: BLOCK 中最小的行数,为 3616 行 MaxRows: BLOCK 中最大的行数,为 4096行 -Average_Rows: BLOCK 中的平均行数,为4000 行 +Average_Rows: 每个 BLOCK 中的平均行数,为4000 行 *************************** 3.row *************************** -_block_dist: Total_Tables=[1] Total_Files=[2] +_block_dist: Total_Tables=[1] Total_Files=[2] Total_Vgroups=[1] -Total_Tables: 表示子表的个数,这里为1 +Total_Tables: 子表的个数,这里为 1 -Total_Files: 表数据保存在几个文件中,这里保存在 2 个文件中 +Total_Files: 表数据被分别保存的数据文件数量,这里是 2 个文件 + +Total_Vgroups: 表数据分布的虚拟节点(vnode)数量 *************************** 5.row *************************** diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md index 086d72940c..a6696977f9 100644 --- a/docs/zh/14-reference/11-docker/index.md +++ b/docs/zh/14-reference/11-docker/index.md @@ -309,7 +309,7 @@ services: TAOS_FIRST_EP: "td-1" volumes: - taosdata-td3:/var/lib/taos/ - - taoslog-td3:/var/log/taos/ + - taoslog-td3:/var/log/taos/ volumes: taosdata-td1: taoslog-td1: @@ -473,18 +473,18 @@ Creating service taos_adapter ```shell $ docker stack ps taos ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago -o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago +o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago q5m1oxs589cp taos_td-2.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago $ docker service ls ID NAME MODE REPLICAS IMAGE PORTS -ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 +ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 crmhdjw6vxw0 taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp -o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 +o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ``` @@ -495,11 +495,11 @@ rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ```shell $ docker service scale taos_adapter=1 taos_adapter scaled to 1 -overall progress: 1 out of 1 tasks -1/1: running [==================================================>] +overall progress: 1 out of 1 tasks +1/1: running [==================================================>] verify: Service converged $ docker service ls -f name=taos_adapter ID NAME MODE REPLICAS IMAGE PORTS -ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 +ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 ``` diff --git a/docs/zh/25-application/_03-immigrate.md b/docs/zh/25-application/03-immigrate.md similarity index 70% rename from docs/zh/25-application/_03-immigrate.md rename to docs/zh/25-application/03-immigrate.md index d1c9caea09..75788c0cc7 100644 --- a/docs/zh/25-application/_03-immigrate.md +++ b/docs/zh/25-application/03-immigrate.md @@ -18,82 +18,15 @@ title: OpenTSDB 应用迁移到 TDengine 的最佳实践 如果我们将原本运行在 OpenTSDB 上的应用迁移到 TDengine 上,不仅可以有效地降低计算和存储资源的占用、减少部署服务器的规模,还能够极大减少运行维护的成本的输出,让运维管理工作更简单、更轻松,大幅降低总拥有成本。与 OpenTSDB 一样,TDengine 也已经进行了开源,不同的是,除了单机版,后者还实现了集群版开源,被厂商绑定的顾虑一扫而空。 -在下文中我们将就“使用最典型并广泛应用的运维监控(DevOps)场景”来说明,如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。后续的章节会做更深度的介绍,以便于进行非 DevOps 场景的迁移。 +在下文中我们将说明如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。 -## DevOps 应用快速迁移 +## TDengine 与 OpenTSDB 的差异 -### 1、典型应用场景 - -一个典型的 DevOps 应用场景的系统整体的架构如下图(图 1) 所示。 - -**图 1. DevOps 场景中典型架构** -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "图1. DevOps 场景中典型架构") - -在该应用场景中,包含了部署在应用环境中负责收集机器度量(Metrics)、网络度量(Metrics)以及应用度量(Metrics)的 Agent 工具、汇聚 Agent 收集信息的数据收集器,数据持久化存储和管理的系统以及监控数据可视化工具(例如:Grafana 等)。 - -其中,部署在应用节点的 Agents 负责向 collectd/Statsd 提供不同来源的运行指标,collectd/StatsD 则负责将汇聚的数据推送到 OpenTSDB 集群系统,然后使用可视化看板 Grafana 将数据可视化呈现出来。 - -### 2、迁移服务 - -- **TDengine 安装部署** - -首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 - -注意,安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后再启动。 - -- **调整数据收集器配置** - -在 TDengine 2.4 版本中,包含一个组件 taosAdapter。taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。 - -用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。 - -通过 taosAdapter,用户可以将 collectd 或 StatsD 收集的数据直接推送到 TDengine ,实现应用场景的无缝迁移,非常的轻松便捷。taosAdapter 还支持 Telegraf、Icinga、TCollector 、node_exporter 的数据接入,使用详情参考[taosAdapter](/reference/taosadapter/)。 - -如果使用 collectd,修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为 192.168.1.130,端口为 6046,配置如下: - -```html -LoadPlugin write_tsdb - - - Host "192.168.1.130" Port "6046" HostTags "status=production" StoreRates - false AlwaysAppendDS false - - -``` - -即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter, taosAdapter 将调用 API 将数据写入到 TDengine 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。 - -- **调整看板(Dashboard)系统** - -在数据能够正常写入 TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用 TDengine 提供的 Grafana 插件请参考[与其他工具的连接](/third-party/grafana)。 - -TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到 Grafana 中即可激活使用。 - -**图 2. 导入 Grafana 模板** -![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "图2. 导入 Grafana 模板") - -操作完以上步骤后,就完成了将 OpenTSDB 替换成为 TDengine 的迁移工作。可以看到整个流程非常简单,不需要写代码,只需要对某些配置文件进行调整即可完成全部的迁移工作。 - -### 3、迁移后架构 - -完成迁移以后,此时的系统整体的架构如下图(图 3)所示,而整个过程中采集端、数据写入端、以及监控呈现端均保持了稳定,除了极少的配置调整外,不涉及任何重要的更改和变动。OpenTSDB 大量的应用场景均为 DevOps ,这种场景下,简单的参数设置即可完成 OpenTSDB 到 TDengine 迁移动作,使用上 TDengine 更加强大的处理能力和查询性能。 - -在绝大多数的 DevOps 场景中,如果你拥有一个小规模的 OpenTSDB 集群(3 台及以下的节点)作为 DevOps 的存储端,依赖于 OpenTSDB 为系统持久化层提供数据存储和查询功能,那么你可以安全地将其替换为 TDengine,并节约更多的计算和存储资源。在同等计算资源配置情况下,单台 TDengine 即可满足 3 ~ 5 台 OpenTSDB 节点提供的服务能力。如果规模比较大,那便需要采用 TDengine 集群。 - -如果你的应用特别复杂,或者应用领域并不是 DevOps 场景,你可以继续阅读后续的章节,更加全面深入地了解将 OpenTSDB 的应用迁移到 TDengine 的高级话题。 - -**图 3. 迁移完成后的系统架构** -![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "图 3. 迁移完成后的系统架构") - -## 其他场景的迁移评估与策略 - -### 1、TDengine 与 OpenTSDB 的差异 - -本章将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本章的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。 +本节将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本节的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。 TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine,需要将前端看板重新适配到 Grafana 才可以正常运行。 -在 2.3.0.x 版本中,TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 +如果您的收集端使用了像 collectd 和 StatsD 这样的数据采集工具,要重新配置这些数据采集工具将数据写入到 TDengine。TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。 此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项: @@ -104,11 +37,11 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的 通过上面的介绍,相信你应该能够了解 OpenTSDB 迁移到 TDengine 带来的变化,这些信息也有助于你正确地判断是否可以接受将应用 迁移到 TDengine 之上,体验 TDengine 提供的强大的时序数据处理能力和便捷的使用体验。 -### 2、迁移策略 +## 迁移策略 首先将基于 OpenTSDB 的系统进行迁移涉及到的数据模式设计、系统规模估算、数据写入端改造,进行数据分流、应用适配工作;之后将两个系统并行运行一段时间,再将历史数据迁移到 TDengine 中。当然如果你的应用中有部分功能强依赖于上述 OpenTSDB 特性,同时又不希望停止使用,可以考虑保持原有的 OpenTSDB 系统运行,同时启动 TDengine 来提供主要的服务。 -## 数据模型设计 +### 数据模型设计 一方面,TDengine 要求其入库的数据具有严格的模式定义。另一方面,TDengine 的数据模型相对于 OpenTSDB 来说又更加丰富,多值模型能够兼容全部的单值模型的建立需求。 @@ -150,7 +83,7 @@ insert into memory_vm130_memory_buffered_collectd using memory tags(‘vm130’ 如果你想要利用 TDengine 的多值模型能力,需要首先满足以下要求:不同的采集量具有相同的采集频率,且能够通过消息队列**同时到达**数据写入端,从而确保使用 SQL 语句将多个指标一次性写入。将度量的名称作为超级表的名称,建立具有相同采集频率且能够同时到达的数据多列模型。子表的表名采用具有固定规则的方式进行命名。上述每个度量均只包含一个测量值,因此无法将其转化为多值模型。 -## 数据分流与应用适配 +### 数据分流与应用适配 从消息队列中订阅数据,并启动调整后的写入程序写入数据。 @@ -166,15 +99,128 @@ TDengine 不支持采用 OpenTSDB 的查询语法进行查询或数据获取处 TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。 -## 历史数据迁移 -### 1、使用工具自动迁移数据 +## 使用 DataX 迁移数据 -为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 +为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了适配 TDengine 3.0 的插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。 -DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。 +### 安装和部署 TDengine -在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 +在进行数据迁移之前,要有一个正确运行的 TDengine 集群。首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参考 [安装指南](../../get-started/package) + +安装完成后,请根据 [部署指南](../../deployment/deploy) 配置集群。 + +### 插件功能介绍 + +1. TDengine30Reader 提供的功能: + 1. 支持通过 SQL 进行数据筛选; + 2. 根据时间间隔进行任务切分; + 3. 支持 TDengine 的全部数据类型; + 4. 支持批量读取,通过 batchSize 参数控制批量拉取结果集的大小,提高读取性能。 +2. TDengine30Writer 支持的功能: + 1. 支持 OpenTSDB 的 json 格式的行协议,使用 TDengine 的 schemaless 方式写入 TDengine。 + 2. 支持批量写入,通过 batchSize 参数控制批量写入的数量,提高写入性能。 + +### DataX 安装环境准备 + +1. 需要安装 TDengine 客户端 +2. 需要安装 JDK 1.8 环境(运行 DataX) +3. 需要安装 Python 环境(运行 DataX) +4. 需要 maven 编译环境(如果不编译 DataX 则可以不安装 maven) + +### 安装 + +1. 下载源码 +~~~ +git clone https://github.com/taosdata/DataX.git +~~~ +2. 编译打包 +~~~ +cd DataX +mvn -U clean package assembly:assembly -Dmaven.test.skip=true +~~~ +3. 安装 +~~~ +cp target/datax.tar.gz your_install_dir +cd your_install_dir +tar -zxvf dataX.tar.gz +~~~ + +### 数据迁移 Job 的配置 + +以一个从 OpenTSDB 到 TDengine 3.0 版本的数据迁移任务为例,配置文件 opentsdb2tdengine.json 如下: +~~~ +{ + "job":{ + "content":[{ + "reader": { + "name": "opentsdbreader", + "parameter": { + "endpoint": "http://192.168.1.180:4242", + "column": ["weather_temperature"], + "beginDateTime": "2021-01-01 00:00:00", + "endDateTime": "2021-01-01 01:00:00" + } + }, + "writer": { + "name": "tdengine30writer", + "parameter": { + "username": "root", + "password": "taosdata", + "connection": [ + { + "table": [ + "matric1" + ], + "jdbcUrl": "jdbc:TAOS://192.168.1.101:6030/test?timestampFormat=TIMESTAMP" + } + ], + "batchSize": 1000, + "ignoreTagsUnmatched": true + } + } + }], + "setting": { + "speed": { + "channel": 1 + } + } + } + } +~~~ +配置说明: +1. 上面的配置表示,从 192.168.1.180 的 OpenTSDB,到 192.168.1.101 的 TDengine 的迁移。迁移 metric 为 weather_temperature,时间从 2021-01-01 00:00:00 开始,到 2021-01-01 01:00:00 结束的数据。 +2. reader 使用 datax 的 opentsdbreader,parameter 的配置请参考:[opentsdbreader.md#配置参数](https://github.com/taosdata/DataX/blob/master/opentsdbreader/doc/opentsdbreader.md) +3. tdengine30writer 的 parameter 中,user,password 为必须项,没有默认值。batchSize 不是必须项,默认值为 1。详细参考:[tdengine30writer.md#配置参数](https://github.com/taosdata/DataX/blob/master/tdengine30writer/doc/tdengine30writer-CN.md) +4. TDengine 中,如果 dbname 指定的 database 不存在,则需要在迁移前创建数据库。 + +### 执行迁移任务 + +~~~ +python bin/datax.py job/opentsdb2tdengine.json +~~~ + +### 限制条件 + +1. 目前,DataX 自带的 opentsdbreader 仅支持 OpenTSDB-2.3.X 版本。详细参考:[opentsdbreader#约束限制](https://github.com/alibaba/DataX/blob/master/opentsdbreader/doc/opentsdbreader.md#5-%E7%BA%A6%E6%9D%9F%E9%99%90%E5%88%B6) +2. 数据迁移工具依赖 TDengine 客户端中的 `libtaos.so/taos.dll/libtaos.dylib`,需要与服务端对应版本的 TDengine-client。 + +### FAQ + +1. 如何估算一个数据迁移任务所需要的资源 + DataX 的每个 reader 按照自己的 task 切分策略进行任务划分,具体请参考 DataX 的任务调度规则。在估算资源是,需要按照数据迁移的数据量,任务切分规则和网络带宽限制等综合考虑,最好以实际数据迁移测试结果为准。 +2. TDengine30Writer 的 batchSize 设置多大效率最高? + batchSize 是控制批量写入的参数,在获取 batchSize 行纪录后,TDengineWriter 会向 TDengine 发送一次写入请求,这减少了与 TDengine 交互次数,从而提高了性能。从测试结果来看,batchSize 在 500-1000 范围内效率最高。 +3. job 的配置中 channel 数为多少合适? + job 中的 channel 数为流量控制的参数,每个 channel 都需要开辟一块内存,用来缓存数据。如果 channel 设置过大,会引起 OOM,所以 channel 数并不是越大越好。增加 channel 数后,需要提高 JVM 内存大小。从测试结果来看,channel 在 1~6 的范围内都是合适,能够保证 DataX 的流量最大化即可。 +4. java.sql.SQLException: TDengine ERROR (8000060b): Invalid client value + 配置文件中 column 中没有配置 tbname,此时会触发行协议数据写入(行协议写入只会自动创建子表名,但需要提前创建好超级表),行协议写入的情况下不支持 TAG 数据类型为非 NCHAR,所以此种情况有两种解决方案:1.将 TAG 全部修改为 NCHAR 类型;2.在 Column 中配置好表名称这样不会触发行协议写入。 +5. java.sql.SQLException: TDengine ERROR (8000060b): Timestamp data out of range + 配置文件中 column 中没有配置 tbname,此时会触发行协议数据写入,且 TAG 全部为 NCHAR 类型,此时需要保证时间戳的一列名称为 _ts,而不能是其他名称(行协议写入下,默认将最后的时间戳写入到 _ts 一列,且不能随意命名)。若想避免请使用 tbname 指定表名以避免触发行协议写入。 + +### 提升性能 + + 在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。 | DataX 实例个数 (并发进程个数) | 迁移记录速度 (条/秒) | | ----------------------------- | --------------------- | @@ -184,9 +230,9 @@ DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参 | 5 | 约 29.5 万 | | 10 | 约 33 万 | -
(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备,channel 和 batchSize 分别为 8 和 1000,每条记录包含 10 个 tag) +(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备,channel 和 batchSize 分别为 8 和 1000,每条记录包含 10 个 tag) -### 2、手动迁移数据 +## 手动迁移数据 如果你需要使用多值模型进行数据写入,就需要自行开发一个将数据从 OpenTSDB 导出的工具,然后确认哪些时间线能够合并导入到同一个时间线,再将可以同时导入的时间通过 SQL 语句的写入到数据库中。 @@ -393,31 +439,11 @@ WHERE ts>=1510560000 AND ts<=1515000009 综上所述,可使用单台 16 核 32GB 的机器,或者使用 2 台 8 核 16GB 机器构成的集群。 -## 附录 3: 集群部署及启动 - -TDengine 提供了丰富的帮助文档说明集群安装、部署的诸多方面的内容,这里提供相应的文档列表,供你参考。 - -### 集群部署 - -首先是安装 TDengine,从官网上下载 TDengine 最新稳定版,解压缩后运行 install.sh 进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 - -注意安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后才启动 `taosd` 服务。 - -### 设置运行参数并启动服务 - -为确保系统能够正常获取运行的必要信息。请在服务端正确设置以下关键参数: - -FQDN、firstEp、secondEP、dataDir、logDir、tmpDir、serverPort。各参数的具体含义及设置的要求,可参见文档《[TDengine 集群安装、管理](/cluster/)》 - -按照相同的步骤,在需要运行的节点上设置参数,并启动 `taosd` 服务,然后添加 Dnode 到集群中。 - -最后启动 `taos` 命令行程序,执行命令 `show dnodes`,如果能看到所有的加入集群的节点,那么集群顺利搭建完成。具体的操作流程及注意事项,请参阅文档《[TDengine 集群安装、管理](/cluster/)》 - -## 附录 4: 超级表名称 +## 附录 3: 超级表名称 由于 OpenTSDB 的 metric 名称中带有点号(“.”),例如“cpu.usage_user”这种名称的 metric。但是点号在 TDengine 中具有特殊含义,是用来分隔数据库和表名称的分隔符。TDengine 也提供转义符,以允许用户在(超级)表名称中使用关键词或特殊分隔符(如:点号)。为了使用特殊字符,需要采用转义字符将表的名称括起来,例如:`cpu.usage_user`这样就是合法的(超级)表名称。 -## 附录 5:参考文章 +## 附录 4:参考文章 -1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](/application/collectd/) -2. [通过 collectd 将采集数据直接写入 TDengine](/third-party/collectd/) +1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](../collectd/) +2. [通过 collectd 将采集数据直接写入 TDengine](../../third-party/collectd/) diff --git a/examples/c/asyncdemo.c b/examples/c/asyncdemo.c index c86cd44354..91ec6f24b1 100644 --- a/examples/c/asyncdemo.c +++ b/examples/c/asyncdemo.c @@ -92,7 +92,7 @@ int main(int argc, char *argv[]) } // a simple way to parse input parameters - if (argc >= 3) strcpy(db, argv[2]); + if (argc >= 3) strncpy(db, argv[2], sizeof(db) - 1); if (argc >= 4) points = atoi(argv[3]); if (argc >= 5) numOfTables = atoi(argv[4]); diff --git a/include/common/tcommon.h b/include/common/tcommon.h index c6e21af644..f4e13509c2 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -368,6 +368,12 @@ typedef struct SSortExecInfo { int32_t readBytes; // read io bytes } SSortExecInfo; +typedef struct STUidTagInfo { + char* name; + uint64_t uid; + void* pTagVal; +} STUidTagInfo; + // stream special block column #define START_TS_COLUMN_INDEX 0 diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 0bfb057f20..8be5cb4d41 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -106,7 +106,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData); // SRow ================================ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow); -void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); +int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); void tRowDestroy(SRow *pRow); void tRowSort(SArray *aRowP); int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag); diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 209bc29e4f..b5fd6c0270 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -49,6 +49,7 @@ extern int32_t tsTagFilterResCacheSize; // queue & threads extern int32_t tsNumOfRpcThreads; +extern int32_t tsNumOfRpcSessions; extern int32_t tsNumOfCommitThreads; extern int32_t tsNumOfTaskQueueThreads; extern int32_t tsNumOfMnodeQueryThreads; @@ -86,9 +87,9 @@ extern int32_t tsTelemInterval; extern char tsTelemServer[]; extern uint16_t tsTelemPort; extern bool tsEnableCrashReport; -extern char* tsTelemUri; -extern char* tsClientCrashReportUri; -extern char* tsSvrCrashReportUri; +extern char *tsTelemUri; +extern char *tsClientCrashReportUri; +extern char *tsSvrCrashReportUri; // query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing @@ -159,6 +160,8 @@ extern int32_t tsUptimeInterval; extern int32_t tsRpcRetryLimit; extern int32_t tsRpcRetryInterval; +extern bool tsDisableStream; + // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 71077f10cd..77b9d2d681 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1846,6 +1846,7 @@ typedef struct { int8_t createStb; uint64_t targetStbUid; SArray* fillNullCols; // array of SColLocation + int64_t deleteMark; int8_t igUpdate; } SCMCreateStreamReq; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 7aae38f7ba..46ca814e50 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -172,8 +172,8 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, "lost-consumer-clear", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL) + // TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL) + // TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_VND_MSG) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 32db6773e0..fb6ef26a8a 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -76,7 +76,7 @@ enum { enum { MAIN_SCAN = 0x0u, REVERSE_SCAN = 0x1u, // todo remove it - REPEAT_SCAN = 0x2u, // repeat scan belongs to the master scan + PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan }; typedef struct SPoint1 { @@ -132,14 +132,16 @@ typedef struct SqlFunctionCtx { SInputColumnInfoData input; SResultDataInfo resDataInfo; uint32_t order; // data block scanner order: asc|desc + uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason] + uint8_t isNotNullFunc;// not return null value. uint8_t scanFlag; // record current running step, default: 0 int16_t functionId; // function id char *pOutput; // final result output buffer, point to sdata->data - int32_t numOfParams; // input parameter, e.g., top(k, 20), the number of results of top query is kept in param SFunctParam *param; // corresponding output buffer for timestamp of each result, e.g., diff/csum SColumnInfoData *pTsOutput; + int32_t numOfParams; int32_t offset; SResultRowEntryInfo *resultInfo; SSubsidiaryResInfo subsidiaries; @@ -152,7 +154,7 @@ typedef struct SqlFunctionCtx { struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity SSerializeDataHandle saveHandle; int32_t exprIdx; - char udfName[TSDB_FUNC_NAME_LEN]; + char *udfName; } SqlFunctionCtx; typedef struct tExprNode { diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h index d7bc151ecc..6b09bf4899 100644 --- a/include/libs/stream/streamState.h +++ b/include/libs/stream/streamState.h @@ -114,6 +114,7 @@ int32_t streamStateGetParTag(SStreamState* pState, int64_t groupId, void** tagVa #if 0 char* streamStateSessionDump(SStreamState* pState); +char* streamStateIntervalDump(SStreamState* pState); #endif #ifdef __cplusplus diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 1c52d7ea5d..1d301623b1 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -175,20 +175,24 @@ typedef struct { void streamFreeQitem(SStreamQueueItem* data); +#if 0 bool streamQueueResEmpty(const SStreamQueueRes* pRes); int64_t streamQueueResSize(const SStreamQueueRes* pRes); SStreamQueueNode* streamQueueResFront(SStreamQueueRes* pRes); SStreamQueueNode* streamQueueResPop(SStreamQueueRes* pRes); void streamQueueResClear(SStreamQueueRes* pRes); SStreamQueueRes streamQueueBuildRes(SStreamQueueNode* pNode); +#endif typedef struct { SStreamQueueNode* pHead; } SStreamQueue1; +#if 0 bool streamQueueHasTask(const SStreamQueue1* pQueue); int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem); SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue); +#endif typedef struct { STaosQueue* queue; @@ -633,9 +637,10 @@ typedef struct SStreamMeta { SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId); void streamMetaClose(SStreamMeta* streamMeta); +int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask); int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen); -SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); +// SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); @@ -644,7 +649,7 @@ void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); int32_t streamMetaBegin(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaRollBack(SStreamMeta* pMeta); -int32_t streamLoadTasks(SStreamMeta* pMeta); +int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver); // checkpoint int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq); diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index defafce30e..5e37da4f3f 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -36,7 +36,7 @@ extern "C" { #define SYNC_DEL_WAL_MS (1000 * 60) #define SYNC_ADD_QUORUM_COUNT 3 #define SYNC_MNODE_LOG_RETENTION 10000 -#define SYNC_VNODE_LOG_RETENTION 20 +#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1) #define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10 #define SNAPSHOT_WAIT_MS 1000 * 30 diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index de3c2a9f52..0cc0ab64ef 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -112,7 +112,12 @@ typedef struct SRpcInit { // fail fast fp RpcFFfp ffp; - void *parent; + int32_t connLimitNum; + int32_t connLimitLock; + + int8_t supportBatch; // 0: no batch, 1. batch + int32_t batchSize; + void *parent; } SRpcInit; typedef struct { diff --git a/include/os/osEnv.h b/include/os/osEnv.h index 533d989ffc..bc65da47a9 100644 --- a/include/os/osEnv.h +++ b/include/os/osEnv.h @@ -41,6 +41,7 @@ extern char tsSSE42Enable; extern char tsAVXEnable; extern char tsAVX2Enable; extern char tsFMAEnable; +extern char tsTagFilterCache; extern char configDir[]; extern char tsDataDir[]; diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 11d60b8b03..bdaa68dfb7 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -67,6 +67,10 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) // #define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected" #define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // + + + //common & util #define TSDB_CODE_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) // @@ -115,6 +119,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) // #define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) // +#define TSDB_CODE_IVLD_DATA_FMT TAOS_DEF_ERROR_CODE(0, 0x0132) // //client #define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) diff --git a/include/util/tarray.h b/include/util/tarray.h index f8e872ec66..5b36bc4d0e 100644 --- a/include/util/tarray.h +++ b/include/util/tarray.h @@ -43,6 +43,7 @@ typedef struct SArray { * @return */ SArray* taosArrayInit(size_t size, size_t elemSize); +SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize); /** * @@ -139,14 +140,6 @@ void* taosArrayGetLast(const SArray* pArray); */ size_t taosArrayGetSize(const SArray* pArray); -/** - * set the size of array - * @param pArray - * @param size size of the array - * @return - */ -void taosArraySetSize(SArray* pArray, size_t size); - /** * insert data into array * @param pArray diff --git a/include/util/tdef.h b/include/util/tdef.h index 2130085763..b89fc32bde 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -283,8 +283,9 @@ typedef enum ELogicConditionType { #define TSDB_DNODE_ROLE_MGMT 1 #define TSDB_DNODE_ROLE_VNODE 2 -#define TSDB_MAX_REPLICA 5 -#define TSDB_SYNC_LOG_BUFFER_SIZE 4096 +#define TSDB_MAX_REPLICA 5 +#define TSDB_SYNC_LOG_BUFFER_SIZE 4096 +#define TSDB_SYNC_LOG_BUFFER_RETENTION (TSDB_SYNC_LOG_BUFFER_SIZE >> 4) #define TSDB_TBNAME_COLUMN_INDEX (-1) #define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta @@ -414,7 +415,7 @@ typedef enum ELogicConditionType { #ifdef WINDOWS #define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections. #else -#define TSDB_MAX_RPC_THREADS 20 +#define TSDB_MAX_RPC_THREADS 10 #endif #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type diff --git a/include/util/tlog.h b/include/util/tlog.h index 808377fa77..4311719ca5 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -89,7 +89,7 @@ bool taosAssertRelease(bool condition); // Disable all asserts that may compromise the performance. #if defined DISABLE_ASSERT #define ASSERT(condition) -#define ASSERTS(condition, ...) +#define ASSERTS(condition, ...) (0) #else #define ASSERTS(condition, ...) taosAssertDebug(condition, __FILE__, __LINE__, __VA_ARGS__) #ifdef NDEBUG diff --git a/source/libs/executor/inc/tsimplehash.h b/include/util/tsimplehash.h similarity index 99% rename from source/libs/executor/inc/tsimplehash.h rename to include/util/tsimplehash.h index 7344c34261..c9df911476 100644 --- a/source/libs/executor/inc/tsimplehash.h +++ b/include/util/tsimplehash.h @@ -116,6 +116,7 @@ typedef struct SHNode { struct SHNode *next; uint32_t keyLen : 20; uint32_t dataLen : 12; + uint32_t hashVal; char data[]; } SHNode; #pragma pack(pop) diff --git a/include/util/ttrace.h b/include/util/ttrace.h index 579768228a..5cdb1eecaa 100644 --- a/include/util/ttrace.h +++ b/include/util/ttrace.h @@ -45,11 +45,25 @@ typedef struct STraceId { #define TRACE_GET_MSGID(traceId) (traceId)->msgId -#define TRACE_TO_STR(traceId, buf) \ - do { \ - int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \ - int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \ - sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \ +//#define TRACE_TO_STR(traceId, buf) \ +// do { \ +// int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \ +// int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \ +// sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \ +// } while (0) + +#define TRACE_TO_STR(_traceId, _buf) \ + do { \ + int64_t rootId = (_traceId) != NULL ? (_traceId)->rootId : 0; \ + int64_t msgId = (_traceId) != NULL ? (_traceId)->msgId : 0; \ + char* _t = _buf; \ + _t[0] = '0'; \ + _t[1] = 'x'; \ + _t += titoa(rootId, 16, &_t[2]); \ + _t[0] = ':'; \ + _t[1] = '0'; \ + _t[2] = 'x'; \ + _t += titoa(msgId, 16, &_t[3]); \ } while (0) #ifdef __cplusplus diff --git a/include/util/tutil.h b/include/util/tutil.h index 9fb68aebdc..e0801e5295 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -46,6 +46,9 @@ char *paGetToken(char *src, char **token, int32_t *tokenLen); int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]); int32_t taosHexStrToByteArray(char hexstr[], char bytes[]); +int32_t tintToHex(uint64_t val, char hex[]); +int32_t titoa(uint64_t val, size_t radix, char str[]); + char *taosIpStr(uint32_t ipInt); uint32_t ip2uint(const char *const ip_addr); void taosIp2String(uint32_t ip, char *str); diff --git a/include/util/xxhash.h b/include/util/xxhash.h new file mode 100644 index 0000000000..d6bad94335 --- /dev/null +++ b/include/util/xxhash.h @@ -0,0 +1,328 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** + * API modifier + ******************************/ +/** XXH_INLINE_ALL (and XXH_PRIVATE_API) + * This is useful to include xxhash functions in `static` mode + * in order to inline them, and remove their symbol from the public list. + * Inlining can offer dramatic performance improvement on small keys. + * Methodology : + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * `xxhash.c` is automatically included. + * It's not useful to compile and link it as a separate module. + */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else + /* this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ + +/*! XXH_NAMESPACE, aka Namespace Emulation : + * + * If you want to include _and expose_ xxHash functions from within your own library, + * but also want to avoid symbol collisions with other libraries which may also include xxHash, + * + * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library + * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). + * + * Note that no change is required within the calling program as long as it includes `xxhash.h` : + * regular symbol name will be automatically translated by this header. + */ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 5 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +typedef unsigned int XXH32_hash_t; + +/*! XXH32() : + Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); + +/*====== Streaming ======*/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +/* + * Streaming functions generate the xxHash of an input provided in multiple segments. + * Note that, for small input, they are slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * XXH state must first be allocated, using XXH*_createState() . + * + * Start a new hash by initializing state with a seed, using XXH*_reset(). + * + * Then, feed the hash state by calling XXH*_update() as many times as necessary. + * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using XXH*_digest(). + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a digest, + * and generate some new hashes later on, by calling again XXH*_digest(). + * + * When done, free XXH state space if it was allocated dynamically. + */ + +/*====== Canonical representation ======*/ + +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. + * The canonical representation uses human-readable write convention, aka big-endian (large digits first). + * These functions allow transformation of hash result into and from its canonical format. + * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. + */ + + +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +typedef unsigned long long XXH64_hash_t; + +/*! XXH64() : + Calculate the 64-bit hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark). +*/ +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*====== Streaming ======*/ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/*====== Canonical representation ======*/ +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); +#endif /* XXH_NO_LONG_LONG */ + + + +#ifdef XXH_STATIC_LINKING_ONLY + +/* ================================================================================================ + This section contains declarations which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + These declarations should only be used with static linking. + Never use them in association with dynamic linking ! +=================================================================================================== */ + +/* These definitions are only present to allow + * static allocation of XXH state, on stack or in a struct for example. + * Never **ever** use members directly. */ + +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + +struct XXH32_state_s { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; + uint32_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +struct XXH64_state_s { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; + uint32_t reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + +# else + +struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; + unsigned memsize; + unsigned reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */ +struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; + unsigned memsize; + unsigned reserved[2]; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ +# endif + +# endif + + +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ +#endif + +#endif /* XXH_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* XXHASH_H_5627135585666179 */ diff --git a/packaging/docker/README.md b/packaging/docker/README.md index 763ab73724..4509a7a1a9 100644 --- a/packaging/docker/README.md +++ b/packaging/docker/README.md @@ -18,65 +18,58 @@ TDengine is an open-sourced big data platform under [GNU AGPL v3.0](http://www.g ## How to use this image -### Start a TDengine instance with RESTful API exposed +### Starting TDengine -Simply, you can use `docker run` to start a TDengine instance and connect it with restful connectors(eg. [JDBC-RESTful](https://www.taosdata.com/cn/documentation/connector/java)). +The TDengine image starts with the HTTP service activated by default, using the following command: -```bash +```shell docker run -d --name tdengine -p 6041:6041 tdengine/tdengine ``` -This command starts a docker container by name `tdengine` with TDengine server running, and maps the container's HTTP port 6041 to the host's port 6041. If you have `curl` in your host, you can list the databases by the command: +The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. -```bash +```shell curl -u root:taosdata -d "show databases" localhost:6041/rest/sql ``` -You can execute the `taos` shell command in the container: +The TDengine client taos can be executed in this container to access TDengine using the following command. -```bash +```shell $ docker exec -it tdengine taos -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | -==================================================================================================================================================================================================================================================================================== - log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | -Query OK, 1 row(s) in set (0.002843s) + name | +================================= + information_schema | + performance_schema | +Query OK, 2 row(s) in set (0.002843s) ``` -Since TDengine use container hostname to establish connections, it's a bit more complex to use TDengine CLI and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use TDengine CLI or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need. +The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios. -### Start with host network +### Start TDengine on the host network -```bash +```shell docker run -d --name tdengine --network host tdengine/tdengine ``` -Starts container with `host` network will use host's hostname as fqdn instead of container id. It's much like starting natively with `systemd` in host. After installing the client, you can use `taos` shell as normal in host path. +The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. -```bash +```shell $ taos -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> show dnodes; id | end_point | vnodes | cores | status | role | create_time | offline reason | ====================================================================================================================================== - 1 | host:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | + 1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | Query OK, 1 row(s) in set (0.003233s) ``` -### Start with exposed ports and specified hostname +### Start TDengine with the specified hostname and port -Set the fqdn explicitly will help you to use in other environment or applications. We provide environment variable `TAOS_FQDN` or `fqdn` config option to explicitly set the hostname used by TDengine container instance(s). +The `TAOS_FQDN` environment variable or the `fqdn` configuration item in `taos.cfg` allows TDengine to establish a connection at the specified hostname. This approach provides greater flexibility for deployment. -Use `TAOS_FQDN` variable within `docker run` command: - -```bash +```shell docker run -d \ --name tdengine \ -e TAOS_FQDN=tdengine \ @@ -85,79 +78,58 @@ docker run -d \ tdengine/tdengine ``` -This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`). +The above command starts a TDengine service in the container, which listens to the hostname tdengine, and maps the container's port segment 6030 to 6049 to the host's port segment 6030 to 6049 (both TCP and UDP ports need to be mapped). If the port segment is already occupied on the host, you can modify the above command to specify a free port segment on the host. If `rpcForceTcp` is set to `1`, you can map only the TCP protocol. -If you want to use TDengine CLI or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service. +Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`. -If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable): - -```bash +```shell echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts ``` -Then you can use `taos` with the host `tdengine`: +Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address. -```bash -taos -h tdengine +```shell +taos -h tdengine -P 6030 ``` -Or develop/test applications with native connectors. As in python: +If set `TAOS_FQDN` to the same hostname, the effect is the same as "Start TDengine on host network". -```python -import taos; -conn = taos.connect(host = "tdengine") -res = conn.query("show databases") -for row in res.fetch_all_into_dict(): - print(row) -``` +### Start TDengine on the specified network -See the results: +You can also start TDengine on a specific network. Perform the following steps: -```bash -Python 3.8.10 (default, Nov 26 2021, 20:14:08) -[GCC 9.3.0] on linux -Type "help", "copyright", "credits" or "license" for more information. ->>> import taos; ->>> conn = taos.connect(host = "tdengine") ->>> res = conn.query("show databases") ->>> for row in res.fetch_all_into_dict(): -... print(row) -... -{'name': 'log', 'created_time': datetime.datetime(2022, 1, 17, 22, 56, 2, 490000), 'ntables': 11, 'vgroups': 1, 'replica': 1, 'quorum': 1, 'days': 10, 'keep': '30', 'cache(MB)': 1, 'blocks': 3, 'minrows': 100, 'maxrows': 4096, 'wallevel': 1, 'fsync': 3000, 'comp': 2, 'cachelast': 0, 'precision': 'us', 'update': 0, 'status': 'ready'} -``` +1. First, create a docker network named `td-net` -### Start with specific network + ```shell + docker network create td-net + ``` -Alternatively, you can use TDengine natively by using specific network. +2. Start TDengine -First, create network for TDengine server and client/application. + Start the TDengine service on the `td-net` network with the following command: -```bash -docker network create td-net -``` + ```shell + docker run -d --name tdengine --network td-net \ + -e TAOS_FQDN=tdengine \ + tdengine/tdengine + ``` -Start TDengine instance with service name as fqdn (explicitly set with `TAOS_FQDN`): +3. Start the TDengine client in another container on the same network -```bash -docker run -d --name tdengine --network td-net \ - -e TAOS_FQDN=tdengine \ - tdengine/tdengine -``` + ```shell + docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos + # or + #docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine + ``` -Start TDengine client in another container with the specific network: +### Launching a client application in a container -```bash -docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos -# or -docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine -``` +If you want to start your application in a container, you need to add the corresponding dependencies on TDengine to the image as well, e.g. -When you build your application with docker, you should add the TDengine client in the dockerfile, as based on `ubuntu:20.04` image, install the client like this: - -```dockerfile +```docker FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 +ENV TDENGINE_VERSION=3.0.0.0 RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ @@ -169,10 +141,7 @@ RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENG #CMD ["app"] ``` -Here is an Go example app: - - - +Here is an example GO program: ```go /* @@ -181,19 +150,19 @@ Here is an Go example app: package main import ( - "database/sql" - "flag" - "fmt" - "time" + "database/sql" + "flag" + "fmt" + "time" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) type config struct { - hostName string - serverPort string - user string - password string + hostName string + serverPort string + user string + password string } var configPara config @@ -201,70 +170,67 @@ var taosDriverName = "taosSql" var url string func init() { - flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.") - flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.") - flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") - flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") - flag.Parse() + flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.") + flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.") + flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") + flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") + flag.Parse() } func printAllArgs() { - fmt.Printf("============= args parse result: =============\n") - fmt.Printf("hostName: %v\n", configPara.hostName) - fmt.Printf("serverPort: %v\n", configPara.serverPort) - fmt.Printf("usr: %v\n", configPara.user) - fmt.Printf("password: %v\n", configPara.password) - fmt.Printf("================================================\n") + fmt.Printf("============= args parse result: =============\n") + fmt.Printf("hostName: %v\n", configPara.hostName) + fmt.Printf("serverPort: %v\n", configPara.serverPort) + fmt.Printf("usr: %v\n", configPara.user) + fmt.Printf("password: %v\n", configPara.password) + fmt.Printf("================================================\n") } func main() { - printAllArgs() + printAllArgs() - url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/" + url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/" - taos, err := sql.Open(taosDriverName, url) - checkErr(err, "open database error") - defer taos.Close() + taos, err := sql.Open(taosDriverName, url) + checkErr(err, "open database error") + defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("use test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - checkErr(err, "failed to insert") - rows, err := taos.Query("select * from tb1") - checkErr(err, "failed to select") + taos.Exec("create database if not exists test") + taos.Exec("use test") + taos.Exec("create table if not exists tb1 (ts timestamp, a int)") + _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + checkErr(err, "failed to insert") + rows, err := taos.Query("select * from tb1") + checkErr(err, "failed to select") - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) - } + defer rows.Close() + for rows.Next() { + var r struct { + ts time.Time + a int + } + err := rows.Scan(&r.ts, &r.a) + if err != nil { + fmt.Println("scan error:\n", err) + return + } + fmt.Println(r.ts, r.a) + } } func checkErr(err error, prompt string) { - if err != nil { - fmt.Println("ERROR: %s\n", prompt) - panic(err) - } + if err != nil { + fmt.Println("ERROR: %s\n", prompt) + panic(err) + } } ``` - - +Here is the full Dockerfile: -Full version of dockerfile could be: - -```dockerfile +```docker FROM golang:1.17.6-buster as builder -ENV TDENGINE_VERSION=2.4.0.0 +ENV TDENGINE_VERSION=3.0.0.0 RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ @@ -274,11 +240,13 @@ RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENG WORKDIR /usr/src/app/ ENV GOPROXY="https://goproxy.io,direct" COPY ./main.go ./go.mod ./go.sum /usr/src/app/ -RUN go env && go mod tidy && go build +RUN go env +RUN go mod tidy +RUN go build FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 +ENV TDENGINE_VERSION=3.0.0.0 RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ @@ -291,9 +259,9 @@ COPY --from=builder /usr/src/app/app /usr/bin/ CMD ["app"] ``` -Suppose you have `main.go`, `go.mod` `go.sum`, `app.dockerfile`, build the app and run it with network `td-net`: +Now that we have `main.go`, `go.mod`, `go.sum`, `app.dockerfile`, we can build the application and start it on the `td-net` network. -```bash +```shell $ docker build -t app -f app.dockerfile $ docker run --rm --network td-net app -h tdengine -p 6030 ============= args parse result: ============= @@ -316,26 +284,18 @@ password: taosdata 2022-01-18 01:43:51.029 +0000 UTC 3 ``` -Now you must be much familiar with developing and testing with TDengine, let's see some more complex cases. +### Start the TDengine cluster with docker-compose -### Start with docker-compose with multiple nodes(instances) +1. The following docker-compose file starts a TDengine cluster with three nodes. -Start a 2-replicas-2-mnodes-2-dnodes-1-arbitrator TDengine cluster with `docker-compose` is quite simple. Save the file as `docker-compose.yml`: - -```yaml +```yml version: "3" services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator td-1: image: tdengine/tdengine:$VERSION environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td1:/var/lib/taos/ - taoslog-td1:/var/log/taos/ @@ -344,101 +304,95 @@ services: environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td2:/var/lib/taos/ - taoslog-td2:/var/log/taos/ + td-3: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-3" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td3:/var/lib/taos/ + - taoslog-td3:/var/log/taos/ volumes: taosdata-td1: taoslog-td1: taosdata-td2: taoslog-td2: + taosdata-td3: + taoslog-td3: ``` -You may notice that: +:::note -- We use `VERSION` environment variable to set `tdengine` image tag version once. -- **`TAOS_FIRST_EP`** **MUST** be set to join the newly created instances into an existing TDengine cluster. If you want more instances, use `TAOS_SECOND_EP` in case of HA(High Availability) concerns. -- `TAOS_NUM_OF_MNODES` is for setting number of mnodes for the cluster. -- `TAOS_REPLICA` set the default database replicas, `2` means there're one master and one slave copy of data. The `replica` option should be `1 <= replica <= 3`, and not greater than dnodes number. -- `TAOS_ARBITRATOR` set the arbitrator entrypoint of the cluster for failover/election stuff. It's better to use arbitrator in a two nodes cluster. -- The way to start an arbitrator service is as easy as abc: just add command name `tarbitrator`(which is the binary name of arbitrator daemon) in docker-compose service option: `command: tarbitrator`, and everything is ok now. +- The `VERSION` environment variable is used to set the tdengine image tag +- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time + ::: -Now run `docker-compose up -d` with version specified: +2. Start the cluster -```bash -$ VERSION=2.4.0.0 docker-compose up -d -Creating network "test_default" with the default driver -Creating volume "test_taosdata-td1" with default driver -Creating volume "test_taoslog-td1" with default driver -Creating volume "test_taosdata-td2" with default driver -Creating volume "test_taoslog-td2" with default driver -Creating test_td-1_1 ... done -Creating test_arbitrator_1 ... done -Creating test_td-2_1 ... done -``` + ```shell + $ VERSION=3.0.0.0 docker-compose up -d + Creating network "test_default" with the default driver + Creating volume "test_taosdata-td1" with default driver + Creating volume "test_taoslog-td1" with default driver + Creating volume "test_taosdata-td2" with default driver + Creating volume "test_taoslog-td2" with default driver + Creating test_td-1_1 ... done + Creating test_arbitrator_1 ... done + Creating test_td-2_1 ... done + ``` -Check the status: +3. Check the status of each node -```bash -$ docker-compose ps - Name Command State Ports ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp -test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp -test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp -``` + ```shell + $ docker-compose ps + Name Command State Ports + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp + test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp + test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp + ``` -Check dnodes with TDengine CLI: +4. Show dnodes via TDengine CLI -```bash +```shell $ docker-compose exec td-1 taos -s "show dnodes" -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | + id | endpoint | vnodes | support_vnodes | status | create_time | note | ====================================================================================================================================== - 1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | | - 2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | | - 0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - | -Query OK, 3 row(s) in set (0.000811s) + 1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | | + 2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | | + 3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | | +Query OK, 3 rows in database (0.021262s) + ``` -### Start a TDengine cluster with scaled taosadapter service +## taosAdapter -In previous use case, you could see the way to start other services built with TDengine(`taosd` as the default command). There's another important service you should know: +1. taosAdapter is enabled by default in the TDengine container. If you want to disable it, specify the environment variable `TAOS_DISABLE_ADAPTER=true` at startup -> **taosAdapter** is a TDengine’s companion tool and is a bridge/adapter between TDengine cluster and application. It provides an easy-to-use and efficient way to ingest data from data collections agents(like Telegraf, StatsD, CollectD) directly. It also provides InfluxDB/OpenTSDB compatible data ingestion interface to allow InfluxDB/OpenTSDB applications to immigrate to TDengine seamlessly. +2. At the same time, for flexible deployment, taosAdapter can be started in a separate container -`taosadapter` is running inside `tdengine` image by default, you can disable it by `TAOS_DISABLE_ADAPTER=true`. Running `taosadapter` in a separate container is like how `arbitrator` does: + ```docker + services: + # ... + adapter: + image: tdengine/tdengine:$VERSION + command: taosadapter + ``` -```yaml -services: - # ... - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter -``` + Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: -`taosadapter` could be scaled with docker-compose, so that you can manage the `taosadapter` nodes easily. Here is an example shows 4-`taosadapter` instances in a TDengine cluster(much like previous use cases): - -```yaml +```yml version: "3" networks: inter: - api: services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - networks: - - inter td-1: image: tdengine/tdengine:$VERSION networks: @@ -446,9 +400,6 @@ services: environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td1:/var/lib/taos/ - taoslog-td1:/var/log/taos/ @@ -459,15 +410,12 @@ services: environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td2:/var/lib/taos/ - taoslog-td2:/var/log/taos/ adapter: image: tdengine/tdengine:$VERSION - command: taosadapter + entrypoint: "taosadapter" networks: - inter environment: @@ -481,7 +429,6 @@ services: - adapter networks: - inter - - api ports: - 6041:6041 - 6044:6044/udp @@ -504,100 +451,14 @@ volumes: taoslog-td2: ``` -Start the cluster: +## Deploy with docker swarm -```bash -$ VERSION=2.4.0.0 docker-compose up -d -Creating network "docker_inter" with the default driver -Creating network "docker_api" with the default driver -Creating volume "docker_taosdata-td1" with default driver -Creating volume "docker_taoslog-td1" with default driver -Creating volume "docker_taosdata-td2" with default driver -Creating volume "docker_taoslog-td2" with default driver -Creating docker_td-2_1 ... done -Creating docker_arbitrator_1 ... done -Creating docker_td-1_1 ... done -Creating docker_adapter_1 ... done -Creating docker_adapter_2 ... done -Creating docker_adapter_3 ... done -``` +If you want to deploy a container-based TDengine cluster on multiple hosts, you can use docker swarm. First, to establish a docker swarm cluster on these hosts, please refer to the official docker documentation. -It will start a TDengine cluster with two dnodes and four taosadapter instances, expose ports 6041/tcp and 6044/udp to host. +The docker-compose file can refer to the previous section. Here is the command to start TDengine with docker swarm: -`6041` is the RESTful API endpoint port, you can verify that the RESTful interface taosAdapter provides working using the `curl` command. - -```bash -$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2022-01-18 04:37:42.902",16,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} -``` - -If you run curl in batch(here we use [hyperfine](https://github.com/sharkdp/hyperfine) - a command-line benchmarking tool), the requests are balanced into 4 adapter instances. - -```bash -hyperfine -m10 'curl -u root:taosdata localhost:6041/rest/sql -d "describe log.log"' -``` - -View the logs with `docker-compose logs`: - -```bash -$ docker-compose logs adapter -# some logs skipped -adapter_2 | 01/18 04:57:44.616529 00000039 TAOS_ADAPTER info "| 200 | 162.185µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_1 | 01/18 04:57:44.627695 00000039 TAOS_ADAPTER info "| 200 | 145.485µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=17 -adapter_3 | 01/18 04:57:44.639165 00000040 TAOS_ADAPTER info "| 200 | 146.913µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web -adapter_4 | 01/18 04:57:44.650829 00000039 TAOS_ADAPTER info "| 200 | 153.201µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web -adapter_2 | 01/18 04:57:44.662422 00000039 TAOS_ADAPTER info "| 200 | 211.393µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 -adapter_1 | 01/18 04:57:44.673426 00000039 TAOS_ADAPTER info "| 200 | 154.714µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_3 | 01/18 04:57:44.684788 00000040 TAOS_ADAPTER info "| 200 | 131.876µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_4 | 01/18 04:57:44.696261 00000039 TAOS_ADAPTER info "| 200 | 162.173µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_2 | 01/18 04:57:44.707414 00000039 TAOS_ADAPTER info "| 200 | 164.419µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -adapter_1 | 01/18 04:57:44.720842 00000039 TAOS_ADAPTER info "| 200 | 179.374µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 -adapter_3 | 01/18 04:57:44.732184 00000040 TAOS_ADAPTER info "| 200 | 141.174µs | 172.21.0.9 | POST | /rest/sql " sessionID=19 model=web -adapter_4 | 01/18 04:57:44.744024 00000039 TAOS_ADAPTER info "| 200 | 159.774µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 -adapter_2 | 01/18 04:57:44.773732 00000039 TAOS_ADAPTER info "| 200 | 178.993µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=21 -adapter_1 | 01/18 04:57:44.796518 00000039 TAOS_ADAPTER info "| 200 | 238.24µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -adapter_3 | 01/18 04:57:44.810744 00000040 TAOS_ADAPTER info "| 200 | 176.133µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -adapter_4 | 01/18 04:57:44.826395 00000039 TAOS_ADAPTER info "| 200 | 149.215µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -``` - -`6044/udp` is the [StatsD](https://github.com/statsd/statsd)-compatible port, you can verify this feature with `nc` command(usually provided by `netcat` package). - -```bash -echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 -``` - -Check the result in `taos` shell with `docker-compose exec`: - -```bash -$ dc exec td-1 taos - -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - -taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | -==================================================================================================================================================================================================================================================================================== - log | 2022-01-18 04:37:42.902 | 17 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | - statsd | 2022-01-18 04:45:02.563 | 1 | 1 | 2 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | -Query OK, 2 row(s) in set (0.001838s) - -taos> select * from statsd.foo; - ts | value | metric_type | -======================================================================================= - 2022-01-18 04:45:02.563422822 | 1 | counter | -Query OK, 1 row(s) in set (0.003854s) -``` - -Use `docker-compose up -d adapter=1 to reduce the instances to 1 - -### Deploy TDengine cluster in Docker Swarm with `docker-compose.yml` - -If you use docker swarm mode, it will schedule arbitrator/taosd/taosadapter services into different hosts automatically. If you've no experience with k8s/kubernetes, this is the most convenient way to scale out the TDengine cluster with multiple hosts/servers. - -Use the `docker-compose.yml` file in previous use case, and deploy with `docker stack` or `docker deploy`: - -```bash -$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos +```shell +$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos Creating network taos_inter Creating network taos_api Creating service taos_arbitrator @@ -607,58 +468,40 @@ Creating service taos_adapter Creating service taos_nginx ``` -Now you've created a TDengine cluster with multiple host servers. +Checking status: -Use `docker service` or `docker stack` to manage the cluster: - - - -```bash +```shell $ docker stack ps taos ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago -3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago -100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago -pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago -tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago -rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago -i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago -lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago +79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago +3e94u72msiyg taos_adapter.1 tdengine/tdengine:3.0.0.0 TM1702 Running Running 56 seconds ago +100amjkwzsc6 taos_td-2.1 tdengine/tdengine:3.0.0.0 TM1703 Running Running about a minute ago +pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:3.0.0.0 TM1704 Running Running 2 minutes ago +tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:3.0.0.0 TM1705 Running Running 2 minutes ago +rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:3.0.0.0 TM1706 Running Running 56 seconds ago +i2augxamfllf taos_adapter.3 tdengine/tdengine:3.0.0.0 TM1707 Running Running 56 seconds ago +lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:3.0.0.0 TM1708 Running Running 56 seconds ago $ docker service ls ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0 -3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0 +561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 +3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:3.0.0.0 d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp -2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0 -9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0 +2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 +9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ``` - +From the above output, you can see two dnodes, two taosAdapters, and one Nginx reverse proxy service. -It shows that there are two dnodes, one arbitrator, four taosadapter and one nginx reverse-forward service in this cluster. +Next, we can reduce the number of taosAdapter services. -You can scale down the taosadapter replicas to `1` by `docker service`: - -```bash +```shell $ docker service scale taos_adapter=1 taos_adapter scaled to 1 -overall progress: 1 out of 1 tasks -1/1: running [==================================================>] +overall progress: 1 out of 1 tasks +1/1: running [==================================================>] verify: Service converged $ docker service ls -f name=taos_adapter ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0 +561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 ``` - -Now it remains only 1 taosadapter instance in the cluster. - -When you want to remove the cluster, just type: - -```bash -docker stack rm taos -``` - -### Environment Variables - -When you start `tdengine` image, you can adjust the configuration of TDengine by passing environment variables on the `docker run` command line or in the docker compose file. You can use all of the environment variables that passed to taosd or taosadapter. diff --git a/packaging/docker/docker-compose.yml b/packaging/docker/docker-compose.yml deleted file mode 100644 index 301b41e7d4..0000000000 --- a/packaging/docker/docker-compose.yml +++ /dev/null @@ -1,77 +0,0 @@ -version: "3" - -networks: - inter: - api: - -services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - networks: - - inter - td-1: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter - networks: - - inter - environment: - TAOS_FIRST_EP: "td-1" - TOAS_SECOND_EP: "td-2" - deploy: - replicas: 4 - update_config: - parallelism: 4 - nginx: - image: nginx - depends_on: - - adapter - networks: - - inter - - api - ports: - - 6041:6041 - - 6044:6044/udp - command: [ - "sh", - "-c", - "while true; - do curl -s http://adapter:6041/-/ping >/dev/null && break; - done; - printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' - > /etc/nginx/conf.d/rest.conf; - printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' - >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; - nginx -g 'daemon off;'", - ] -volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh index 8f71e30fbd..db71bf8833 100755 --- a/packaging/docker/dockerManifest.sh +++ b/packaging/docker/dockerManifest.sh @@ -1,9 +1,9 @@ #!/bin/bash set -e #set -x -set -v +set -v -# dockerbuild.sh +# dockerbuild.sh # -n [version number] # -p [xxxx] # -V [stable | beta] @@ -28,7 +28,7 @@ do V) #echo "verType=$OPTARG" verType=$(echo $OPTARG) - ;; + ;; h) echo "Usage: `basename $0` -n [version number] " echo " -p [password for docker hub] " @@ -39,8 +39,8 @@ do a) #echo "dockerLatest=$OPTARG" dockerLatest=$(echo $OPTARG) - ;; - ?) #unknow option + ;; + ?) #unknow option echo "unkonw argument" exit 1 ;; @@ -60,7 +60,7 @@ if [ "$verType" == "stable" ]; then elif [ "$verType" == "beta" ];then verType=beta tagVal=ver-${version}-beta - dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz + dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz dockerinput_x64=TDengine-server-${version}-${verType}-Linux-amd64.tar.gz dockerim=tdengine/tdengine-beta dockeramd64=tdengine/tdengine-amd64-beta @@ -73,30 +73,30 @@ fi username="tdengine" -# generate docker verison +# generate docker version echo "generate ${dockerim}:${version}" docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version} docker manifest inspect ${dockerim}:${version} docker manifest rm ${dockerim}:${version} docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version} docker manifest inspect ${dockerim}:${version} -docker login -u ${username} -p ${passWord} +docker login -u ${username} -p ${passWord} docker manifest push ${dockerim}:${version} -# generate docker latest +# generate docker latest echo "generate ${dockerim}:latest " if [ ${dockerLatest} == 'y' ] ;then echo "docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest" docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest - docker manifest inspect ${dockerim}:latest - docker manifest rm ${dockerim}:latest + docker manifest inspect ${dockerim}:latest + docker manifest rm ${dockerim}:latest docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest docker manifest inspect ${dockerim}:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker manifest push ${dockerim}:latest - docker pull tdengine/tdengine:latest + docker pull tdengine/tdengine:latest fi diff --git a/packaging/docker/dockerbuild.sh b/packaging/docker/dockerbuild.sh index b02387a3d1..4b6fc8576b 100755 --- a/packaging/docker/dockerbuild.sh +++ b/packaging/docker/dockerbuild.sh @@ -74,7 +74,7 @@ do done -# Check_verison() +# Check_version() # { # } @@ -102,14 +102,14 @@ scriptDir=$(dirname $(readlink -f $0)) communityDir=${scriptDir}/../../../community DockerfilePath=${communityDir}/packaging/docker/ if [ "$cloudBuild" == "y" ]; then - comunityArchiveDir=/nas/TDengine/v$version/cloud + communityArchiveDir=/nas/TDengine/v$version/cloud Dockerfile=${communityDir}/packaging/docker/DockerfileCloud else - comunityArchiveDir=/nas/TDengine/v$version/community + communityArchiveDir=/nas/TDengine/v$version/community Dockerfile=${communityDir}/packaging/docker/Dockerfile fi cd ${scriptDir} -cp -f ${comunityArchiveDir}/${pkgFile} . +cp -f ${communityArchiveDir}/${pkgFile} . echo "dirName=${dirName}" diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 6755ed40e5..78eb7f7587 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -627,9 +627,16 @@ function install_app() { fi } -function install_TDengine() { - echo -e "${GREEN}Start to install TDengine...${NC}" - log_print "start to install TDengine" +function checkDirectory() { + if [ ! -d "${bin_link_dir}" ]; then + ${csudo}mkdir -p ${bin_link_dir} + log_print "${bin_link_dir} directory created" + fi + + if [ ! -d "${lib_link_dir}" ]; then + ${csudo}mkdir -p ${lib_link_dir} + log_print "${lib_link_dir} directory created" + fi #install log and data dir , then ln to /usr/local/taos ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} @@ -640,6 +647,13 @@ function install_TDengine() { ${csudo}ln -s ${log_dir} ${log_link_dir} || : ${csudo}ln -s ${data_dir} ${data_link_dir} || : +} + +function install_TDengine() { + echo -e "${GREEN}Start to install TDengine...${NC}" + log_print "start to install TDengine" + + checkDirectory # Install include, lib, binary and service install_include && diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 0348501bf1..7cc7a1717a 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -97,16 +97,14 @@ typedef struct { typedef struct SQueryExecMetric { int64_t start; // start timestamp, us - int64_t syntaxStart; // start to parse, us - int64_t syntaxEnd; // end to parse, us int64_t ctgStart; // start to parse, us - int64_t ctgEnd; // end to parse, us - int64_t semanticEnd; - int64_t planEnd; - int64_t resultReady; - int64_t execEnd; - int64_t send; // start to send to server, us - int64_t rsp; // receive response from server, us + int64_t execStart; // start to parse, us + + int64_t parseCostUs; + int64_t ctgCostUs; + int64_t analyseCostUs; + int64_t planCostUs; + int64_t execCostUs; } SQueryExecMetric; struct SAppInstInfo { diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 532505fe7d..2348648041 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -83,28 +83,22 @@ static void deregisterRequest(SRequestObj *pRequest) { "current:%d, app current:%d", pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst); - tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 - "us, exec:%" PRId64 "us, stmtType:%d", - duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, - pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd, - pRequest->metric.execEnd - pRequest->metric.semanticEnd, pRequest->stmtType); + if (pRequest->pQuery && pRequest->pQuery->pRoot) { + if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->pQuery->pRoot->type && + (0 == ((SVnodeModifyOpStmt *)pRequest->pQuery->pRoot)->sqlNodeType)) { + tscDebug("insert duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64 + "us, planCost:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs, + pRequest->metric.planCostUs, pRequest->metric.execCostUs); + atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); + } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { + tscDebug("query duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64 + "us, planCost:%" PRId64 "us, exec:%" PRId64 "us", + duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs, + pRequest->metric.planCostUs, pRequest->metric.execCostUs); - if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) { - // tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 - // "us, exec:%" PRId64 "us", - // duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, - // pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - - // pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd); - // atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration); - } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { - // tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 - // "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64, - // duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart, - // pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - - // pRequest->metric.ctgEnd, pRequest->metric.planEnd - pRequest->metric.semanticEnd, - // pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId); - - atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); + atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration); + } } if (duration >= SLOW_QUERY_INTERVAL) { @@ -371,8 +365,6 @@ void doDestroyRequest(void *p) { taosArrayDestroy(pRequest->tableList); taosArrayDestroy(pRequest->dbList); taosArrayDestroy(pRequest->targetTableList); - qDestroyQuery(pRequest->pQuery); - nodesDestroyAllocator(pRequest->allocatorRefId); destroyQueryExecRes(&pRequest->body.resInfo.execRes); @@ -387,6 +379,9 @@ void doDestroyRequest(void *p) { taosMemoryFree(pRequest->body.param); } + qDestroyQuery(pRequest->pQuery); + nodesDestroyAllocator(pRequest->allocatorRefId); + taosMemoryFreeClear(pRequest->sqlstr); taosMemoryFree(pRequest); tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 8676f01459..f63069d08b 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -323,7 +323,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { return; } - int32_t code = qExecCommand(&pRequest->pTscObj->id ,pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp); + int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } @@ -465,7 +465,7 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra } void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols) { - if(pResInfo == NULL || pSchema == NULL || numOfCols <= 0){ + if (pResInfo == NULL || pSchema == NULL || numOfCols <= 0) { tscError("invalid paras, pResInfo == NULL || pSchema == NULL || numOfCols <= 0"); return; } @@ -479,7 +479,7 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t } pResInfo->fields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD)); pResInfo->userFields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD)); - if(numOfCols != pResInfo->numOfCols){ + if (numOfCols != pResInfo->numOfCols) { tscError("numOfCols:%d != pResInfo->numOfCols:%d", numOfCols, pResInfo->numOfCols); return; } @@ -925,7 +925,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) { removeMeta(pTscObj, pRequest->targetTableList); } - pRequest->metric.execEnd = taosGetTimestampUs(); + pRequest->metric.execCostUs = taosGetTimestampUs() - pRequest->metric.execStart; int32_t code1 = handleQueryExecRsp(pRequest); if (pRequest->code == TSDB_CODE_SUCCESS && pRequest->code != code1) { pRequest->code = code1; @@ -1051,11 +1051,10 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat pRequest->body.subplanNum = pDag->numOfSubplans; } - pRequest->metric.planEnd = taosGetTimestampUs(); - if (code == TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " create query plan success, elapsed time:%.2f ms, 0x%" PRIx64, pRequest->self, - (pRequest->metric.planEnd - st) / 1000.0, pRequest->requestId); - } + pRequest->metric.execStart = taosGetTimestampUs(); + + pRequest->metric.planCostUs = pRequest->metric.execStart - st; + if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) { SArray* pNodeList = NULL; if (QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pQuery->pRoot)) { @@ -1103,6 +1102,17 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM destorySqlCallbackWrapper(pWrapper); } + if (pQuery->pRoot && !pRequest->inRetry) { + STscObj* pTscObj = pRequest->pTscObj; + SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary; + if (QUERY_NODE_VNODE_MODIFY_STMT == pQuery->pRoot->type && + (0 == ((SVnodeModifyOpStmt*)pQuery->pRoot)->sqlNodeType)) { + atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1); + } else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) { + atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1); + } + } + switch (pQuery->execMode) { case QUERY_EXEC_MODE_LOCAL: asyncExecLocalCmd(pRequest, pQuery); @@ -1358,7 +1368,7 @@ int32_t doProcessMsgFromServer(void* param) { SEpSet* pEpSet = arg->pEpset; SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; - if(pMsg->info.ahandle == NULL){ + if (pMsg->info.ahandle == NULL) { tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL"); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -1374,24 +1384,12 @@ int32_t doProcessMsgFromServer(void* param) { if (pSendInfo->requestObjRefId != 0) { SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId); if (pRequest) { - if(pRequest->self != pSendInfo->requestObjRefId){ - tscError("doProcessMsgFromServer pRequest->self:%"PRId64" != pSendInfo->requestObjRefId:%"PRId64, pRequest->self, pSendInfo->requestObjRefId); + if (pRequest->self != pSendInfo->requestObjRefId) { + tscError("doProcessMsgFromServer pRequest->self:%" PRId64 " != pSendInfo->requestObjRefId:%" PRId64, + pRequest->self, pSendInfo->requestObjRefId); return TSDB_CODE_TSC_INTERNAL_ERROR; } - pRequest->metric.rsp = taosGetTimestampUs(); pTscObj = pRequest->pTscObj; - /* - * There is not response callback function for submit response. - * The actual inserted number of points is the first number. - */ - int32_t elapsed = pRequest->metric.rsp - pRequest->metric.start; - if (pMsg->code == TSDB_CODE_SUCCESS) { - tscDebug("0x%" PRIx64 " rsp msg:%s, code:%s rspLen:%d, elapsed:%d ms, reqId:0x%" PRIx64, pRequest->self, - TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), pMsg->contLen, elapsed / 1000, pRequest->requestId); - } else { - tscError("0x%" PRIx64 " rsp msg:%s, code:%s rspLen:%d, elapsed time:%d ms, reqId:0x%" PRIx64, pRequest->self, - TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), pMsg->contLen, elapsed / 1000, pRequest->requestId); - } } } @@ -1523,7 +1521,7 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) { } void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { - if(pRequest == NULL){ + if (pRequest == NULL) { return NULL; } @@ -1579,7 +1577,7 @@ static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) { } void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) { - if(pRequest == NULL){ + if (pRequest == NULL) { return NULL; } @@ -1645,8 +1643,11 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int char* pStart = pCol->offset[j] + pCol->pData; int32_t len = taosUcs4ToMbs((TdUcs4*)varDataVal(pStart), varDataLen(pStart), varDataVal(p)); - if(len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])){ - tscError("doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + colLength[i]):%p", len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i])); + if (len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])) { + tscError( + "doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + " + "colLength[i]):%p", + len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i])); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -1675,7 +1676,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // length | int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); - if(ASSERT(numOfCols == cols)){ + if (ASSERT(numOfCols == cols)) { tscError("estimateJsonLen error: numOfCols:%d != cols:%d", numOfCols, cols); return -1; } @@ -1748,7 +1749,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int char* p = (char*)pResultInfo->pData; int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows); - if(dataLen <= 0){ + if (dataLen <= 0) { return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -1758,7 +1759,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int int32_t totalLen = 0; int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); - if(ASSERT(numOfCols == cols)){ + if (ASSERT(numOfCols == cols)) { tscError("doConvertJson error: numOfCols:%d != cols:%d", numOfCols, cols); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -1783,7 +1784,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int for (int32_t i = 0; i < numOfCols; ++i) { int32_t colLen = htonl(colLength[i]); int32_t colLen1 = htonl(colLength1[i]); - if(ASSERT(colLen < dataLen)){ + if (ASSERT(colLen < dataLen)) { tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -1870,7 +1871,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, bool convertUcs4) { - if(ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)){ + if (ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)) { tscError("setResultDataPtr paras error"); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -1902,8 +1903,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 int32_t cols = *(int32_t*)p; p += sizeof(int32_t); - if(ASSERT(rows == numOfRows && cols == numOfCols)){ - tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, numOfCols); + if (ASSERT(rows == numOfRows && cols == numOfCols)) { + tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, + numOfCols); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -1970,7 +1972,7 @@ char* getDbOfConnection(STscObj* pObj) { } void setConnectionDB(STscObj* pTscObj, const char* db) { - if(db == NULL || pTscObj == NULL){ + if (db == NULL || pTscObj == NULL) { tscError("setConnectionDB para is NULL"); return; } @@ -1992,7 +1994,7 @@ void resetConnectDB(STscObj* pTscObj) { int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4, bool freeAfterUse) { - if(pResultInfo == NULL || pRsp == NULL){ + if (pResultInfo == NULL || pRsp == NULL) { tscError("setQueryResultFromRsp paras is null"); return TSDB_CODE_TSC_INTERNAL_ERROR; } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 94303bd180..4ba51ce50d 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -752,7 +752,8 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t SRequestObj *pRequest = pWrapper->pRequest; SQuery *pQuery = pRequest->pQuery; - pRequest->metric.ctgEnd = taosGetTimestampUs(); + int64_t analyseStart = taosGetTimestampUs(); + pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart; qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId); if (code == TSDB_CODE_SUCCESS) { @@ -763,7 +764,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t } } - pRequest->metric.semanticEnd = taosGetTimestampUs(); + pRequest->metric.analyseCostUs = taosGetTimestampUs() - analyseStart; if (code == TSDB_CODE_SUCCESS) { if (pQuery->haveResultSet) { @@ -775,10 +776,6 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t TSWAP(pRequest->tableList, (pQuery)->pTableList); TSWAP(pRequest->targetTableList, (pQuery)->pTargetTableList); - double el = (pRequest->metric.semanticEnd - pRequest->metric.ctgEnd) / 1000.0; - tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, elapsed time:%.2f ms, reqId:0x%" PRIx64, - pRequest->self, el, pRequest->requestId); - launchAsyncQuery(pRequest, pQuery, pResultMeta, pWrapper); } else { destorySqlCallbackWrapper(pWrapper); @@ -843,7 +840,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c SRequestObj *pRequest = pWrapper->pRequest; SQuery *pQuery = pRequest->pQuery; - pRequest->metric.ctgEnd = taosGetTimestampUs(); + pRequest->metric.ctgCostUs += taosGetTimestampUs() - pRequest->metric.ctgStart; qDebug("0x%" PRIx64 " start to continue parse, reqId:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId, tstrerror(code)); @@ -956,7 +953,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { } if (TSDB_CODE_SUCCESS == code) { - pRequest->metric.syntaxStart = taosGetTimestampUs(); + int64_t syntaxStart = taosGetTimestampUs(); pWrapper->pCatalogReq = taosMemoryCalloc(1, sizeof(SCatalogReq)); if (pWrapper->pCatalogReq == NULL) { @@ -967,19 +964,11 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) { code = qParseSqlSyntax(pWrapper->pParseCtx, &pRequest->pQuery, pWrapper->pCatalogReq); } - pRequest->metric.syntaxEnd = taosGetTimestampUs(); - } - - if (TSDB_CODE_SUCCESS == code && !updateMetaForce) { - SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; - if (QUERY_NODE_INSERT_STMT == nodeType(pRequest->pQuery->pRoot)) { - atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1); - } else if (QUERY_NODE_SELECT_STMT == nodeType(pRequest->pQuery->pRoot)) { - atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1); - } + pRequest->metric.parseCostUs += taosGetTimestampUs() - syntaxStart; } if (TSDB_CODE_SUCCESS == code) { + pRequest->stmtType = pRequest->pQuery->pRoot->type; phaseAsyncQuery(pWrapper); } else { tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code), @@ -1006,7 +995,6 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { SRequestObj *pRequest = (SRequestObj *)param; SReqResultInfo *pResultInfo = &pRequest->body.resInfo; - pRequest->metric.resultReady = taosGetTimestampUs(); tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code), pRequest->requestId); diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 2191c54315..07624efe04 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -456,12 +456,13 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { (*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS); int32_t len = blockEncode(pBlock, (*pRsp)->data, SHOW_VARIABLES_RESULT_COLS); + blockDataDestroy(pBlock); + if(len != rspSize - sizeof(SRetrieveTableRsp)){ uError("buildShowVariablesRsp error, len:%d != rspSize - sizeof(SRetrieveTableRsp):%" PRIu64, len, (uint64_t) (rspSize - sizeof(SRetrieveTableRsp))); return TSDB_CODE_TSC_INVALID_INPUT; } - blockDataDestroy(pBlock); return TSDB_CODE_SUCCESS; } diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c index a437fd238c..e89227d412 100644 --- a/source/client/src/clientSmlJson.c +++ b/source/client/src/clientSmlJson.c @@ -21,217 +21,218 @@ #define OTD_JSON_SUB_FIELDS_NUM 2 -#define JUMP_JSON_SPACE(start) \ -while(*(start)){\ - if(unlikely(*(start) > 32))\ - break;\ - else\ - (start)++;\ +#define JUMP_JSON_SPACE(start) \ + while (*(start)) { \ + if (unlikely(*(start) > 32)) \ + break; \ + else \ + (start)++; \ } -//SArray *smlJsonParseTags(char *start, char *end){ -// SArray *tags = taosArrayInit(4, sizeof(SSmlKv)); -// while(start < end){ -// SSmlKv kv = {0}; -// kv.type = TSDB_DATA_TYPE_NCHAR; -// bool isInQuote = false; -// while(start < end){ -// if(unlikely(!isInQuote && *start == '"')){ -// start++; -// kv.key = start; -// isInQuote = true; -// continue; -// } -// if(unlikely(isInQuote && *start == '"')){ -// kv.keyLen = start - kv.key; -// start++; -// break; -// } -// start++; -// } -// bool hasColon = false; -// while(start < end){ -// if(unlikely(!hasColon && *start == ':')){ -// start++; -// hasColon = true; -// continue; -// } -// if(unlikely(hasColon && kv.value == NULL && (*start > 32 && *start != '"'))){ -// kv.value = start; -// start++; -// continue; -// } +// SArray *smlJsonParseTags(char *start, char *end){ +// SArray *tags = taosArrayInit(4, sizeof(SSmlKv)); +// while(start < end){ +// SSmlKv kv = {0}; +// kv.type = TSDB_DATA_TYPE_NCHAR; +// bool isInQuote = false; +// while(start < end){ +// if(unlikely(!isInQuote && *start == '"')){ +// start++; +// kv.key = start; +// isInQuote = true; +// continue; +// } +// if(unlikely(isInQuote && *start == '"')){ +// kv.keyLen = start - kv.key; +// start++; +// break; +// } +// start++; +// } +// bool hasColon = false; +// while(start < end){ +// if(unlikely(!hasColon && *start == ':')){ +// start++; +// hasColon = true; +// continue; +// } +// if(unlikely(hasColon && kv.value == NULL && (*start > 32 && *start != '"'))){ +// kv.value = start; +// start++; +// continue; +// } // -// if(unlikely(hasColon && kv.value != NULL && (*start == '"' || *start == ',' || *start == '}'))){ -// kv.length = start - kv.value; -// taosArrayPush(tags, &kv); -// start++; -// break; -// } -// start++; -// } -// } -// return tags; -//} +// if(unlikely(hasColon && kv.value != NULL && (*start == '"' || *start == ',' || *start == '}'))){ +// kv.length = start - kv.value; +// taosArrayPush(tags, &kv); +// start++; +// break; +// } +// start++; +// } +// } +// return tags; +// } -//static int32_t smlParseTagsFromJSON(SSmlHandle *info, SSmlLineInfo *elements) { -// int32_t ret = TSDB_CODE_SUCCESS; +// static int32_t smlParseTagsFromJSON(SSmlHandle *info, SSmlLineInfo *elements) { +// int32_t ret = TSDB_CODE_SUCCESS; // -// if(is_same_child_table_telnet(elements, &info->preLine) == 0){ -// return TSDB_CODE_SUCCESS; -// } +// if(is_same_child_table_telnet(elements, &info->preLine) == 0){ +// return TSDB_CODE_SUCCESS; +// } // -// bool isSameMeasure = IS_SAME_SUPER_TABLE; +// bool isSameMeasure = IS_SAME_SUPER_TABLE; // -// int cnt = 0; -// SArray *preLineKV = info->preLineTagKV; -// bool isSuperKVInit = true; -// SArray *superKV = NULL; -// if(info->dataFormat){ -// if(unlikely(!isSameMeasure)){ -// SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL); +// int cnt = 0; +// SArray *preLineKV = info->preLineTagKV; +// bool isSuperKVInit = true; +// SArray *superKV = NULL; +// if(info->dataFormat){ +// if(unlikely(!isSameMeasure)){ +// SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, +// elements->measureLen, NULL); // -// if(unlikely(sMeta == NULL)){ -// sMeta = smlBuildSTableMeta(info->dataFormat); -// STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen); -// sMeta->tableMeta = pTableMeta; -// if(pTableMeta == NULL){ -// info->dataFormat = false; -// info->reRun = true; -// return TSDB_CODE_SUCCESS; -// } -// nodeListSet(&info->superTables, elements->measure, elements->measureLen, sMeta, NULL); -// } -// info->currSTableMeta = sMeta->tableMeta; -// superKV = sMeta->tags; +// if(unlikely(sMeta == NULL)){ +// sMeta = smlBuildSTableMeta(info->dataFormat); +// STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen); +// sMeta->tableMeta = pTableMeta; +// if(pTableMeta == NULL){ +// info->dataFormat = false; +// info->reRun = true; +// return TSDB_CODE_SUCCESS; +// } +// nodeListSet(&info->superTables, elements->measure, elements->measureLen, sMeta, NULL); +// } +// info->currSTableMeta = sMeta->tableMeta; +// superKV = sMeta->tags; // -// if(unlikely(taosArrayGetSize(superKV) == 0)){ -// isSuperKVInit = false; -// } -// taosArraySetSize(preLineKV, 0); -// } -// }else{ -// taosArraySetSize(preLineKV, 0); -// } +// if(unlikely(taosArrayGetSize(superKV) == 0)){ +// isSuperKVInit = false; +// } +// taosArraySetSize(preLineKV, 0); +// } +// }else{ +// taosArraySetSize(preLineKV, 0); +// } // -// SArray *tags = smlJsonParseTags(elements->tags, elements->tags + elements->tagsLen); -// int32_t tagNum = taosArrayGetSize(tags); -// if (tagNum == 0) { -// uError("SML:tag is empty:%s", elements->tags) -// taosArrayDestroy(tags); -// return TSDB_CODE_SML_INVALID_DATA; -// } -// for (int32_t i = 0; i < tagNum; ++i) { -// SSmlKv kv = *(SSmlKv*)taosArrayGet(tags, i); +// SArray *tags = smlJsonParseTags(elements->tags, elements->tags + elements->tagsLen); +// int32_t tagNum = taosArrayGetSize(tags); +// if (tagNum == 0) { +// uError("SML:tag is empty:%s", elements->tags) +// taosArrayDestroy(tags); +// return TSDB_CODE_SML_INVALID_DATA; +// } +// for (int32_t i = 0; i < tagNum; ++i) { +// SSmlKv kv = *(SSmlKv*)taosArrayGet(tags, i); // -// if(info->dataFormat){ -// if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){ -// info->dataFormat = false; -// info->reRun = true; -// taosArrayDestroy(tags); -// return TSDB_CODE_SUCCESS; -// } +// if(info->dataFormat){ +// if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){ +// info->dataFormat = false; +// info->reRun = true; +// taosArrayDestroy(tags); +// return TSDB_CODE_SUCCESS; +// } // -// if(isSameMeasure){ -// if(unlikely(cnt >= taosArrayGetSize(preLineKV))) { -// info->dataFormat = false; -// info->reRun = true; -// taosArrayDestroy(tags); -// return TSDB_CODE_SUCCESS; -// } -// SSmlKv *preKV = (SSmlKv *)taosArrayGet(preLineKV, cnt); -// if(unlikely(kv.length > preKV->length)){ -// preKV->length = kv.length; -// SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL); -// if(unlikely(NULL == tableMeta)){ -// uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); -// return TSDB_CODE_SML_INTERNAL_ERROR; +// if(isSameMeasure){ +// if(unlikely(cnt >= taosArrayGetSize(preLineKV))) { +// info->dataFormat = false; +// info->reRun = true; +// taosArrayDestroy(tags); +// return TSDB_CODE_SUCCESS; +// } +// SSmlKv *preKV = (SSmlKv *)taosArrayGet(preLineKV, cnt); +// if(unlikely(kv.length > preKV->length)){ +// preKV->length = kv.length; +// SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, +// elements->measureLen, NULL); +// if(unlikely(NULL == tableMeta)){ +// uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); +// return TSDB_CODE_SML_INTERNAL_ERROR; +// } +// +// SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt); +// oldKV->length = kv.length; +// info->needModifySchema = true; +// } +// if(unlikely(!IS_SAME_KEY)){ +// info->dataFormat = false; +// info->reRun = true; +// taosArrayDestroy(tags); +// return TSDB_CODE_SUCCESS; +// } +// }else{ +// if(isSuperKVInit){ +// if(unlikely(cnt >= taosArrayGetSize(superKV))) { +// info->dataFormat = false; +// info->reRun = true; +// taosArrayDestroy(tags); +// return TSDB_CODE_SUCCESS; // } +// SSmlKv *preKV = (SSmlKv *)taosArrayGet(superKV, cnt); +// if(unlikely(kv.length > preKV->length)) { +// preKV->length = kv.length; +// }else{ +// kv.length = preKV->length; +// } +// info->needModifySchema = true; // -// SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt); -// oldKV->length = kv.length; -// info->needModifySchema = true; -// } -// if(unlikely(!IS_SAME_KEY)){ -// info->dataFormat = false; -// info->reRun = true; -// taosArrayDestroy(tags); -// return TSDB_CODE_SUCCESS; -// } -// }else{ -// if(isSuperKVInit){ -// if(unlikely(cnt >= taosArrayGetSize(superKV))) { -// info->dataFormat = false; -// info->reRun = true; -// taosArrayDestroy(tags); -// return TSDB_CODE_SUCCESS; -// } -// SSmlKv *preKV = (SSmlKv *)taosArrayGet(superKV, cnt); -// if(unlikely(kv.length > preKV->length)) { -// preKV->length = kv.length; -// }else{ -// kv.length = preKV->length; -// } -// info->needModifySchema = true; +// if(unlikely(!IS_SAME_KEY)){ +// info->dataFormat = false; +// info->reRun = true; +// taosArrayDestroy(tags); +// return TSDB_CODE_SUCCESS; +// } +// }else{ +// taosArrayPush(superKV, &kv); +// } +// taosArrayPush(preLineKV, &kv); +// } +// }else{ +// taosArrayPush(preLineKV, &kv); +// } +// cnt++; +// } +// taosArrayDestroy(tags); // -// if(unlikely(!IS_SAME_KEY)){ -// info->dataFormat = false; -// info->reRun = true; -// taosArrayDestroy(tags); -// return TSDB_CODE_SUCCESS; -// } -// }else{ -// taosArrayPush(superKV, &kv); -// } -// taosArrayPush(preLineKV, &kv); -// } -// }else{ -// taosArrayPush(preLineKV, &kv); -// } -// cnt++; -// } -// taosArrayDestroy(tags); +// SSmlTableInfo *tinfo = (SSmlTableInfo *)nodeListGet(info->childTables, elements, POINTER_BYTES, +// is_same_child_table_telnet); if (unlikely(tinfo == NULL)) { +// tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen); +// if (unlikely(!tinfo)) { +// return TSDB_CODE_OUT_OF_MEMORY; +// } +// tinfo->tags = taosArrayDup(preLineKV, NULL); // -// SSmlTableInfo *tinfo = (SSmlTableInfo *)nodeListGet(info->childTables, elements, POINTER_BYTES, is_same_child_table_telnet); -// if (unlikely(tinfo == NULL)) { -// tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen); -// if (unlikely(!tinfo)) { -// return TSDB_CODE_OUT_OF_MEMORY; -// } -// tinfo->tags = taosArrayDup(preLineKV, NULL); +// smlSetCTableName(tinfo); +// if (info->dataFormat) { +// info->currSTableMeta->uid = tinfo->uid; +// tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta); +// if (tinfo->tableDataCtx == NULL) { +// smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL); +// return TSDB_CODE_SML_INVALID_DATA; +// } +// } // -// smlSetCTableName(tinfo); -// if (info->dataFormat) { -// info->currSTableMeta->uid = tinfo->uid; -// tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta); -// if (tinfo->tableDataCtx == NULL) { -// smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL); -// return TSDB_CODE_SML_INVALID_DATA; -// } -// } +// SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo)); +// *key = *elements; +// tinfo->key = key; +// nodeListSet(&info->childTables, key, POINTER_BYTES, tinfo, is_same_child_table_telnet); +// } +// if (info->dataFormat) info->currTableDataCtx = tinfo->tableDataCtx; // -// SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo)); -// *key = *elements; -// tinfo->key = key; -// nodeListSet(&info->childTables, key, POINTER_BYTES, tinfo, is_same_child_table_telnet); -// } -// if (info->dataFormat) info->currTableDataCtx = tinfo->tableDataCtx; -// -// return ret; -//} +// return ret; +// } -static char* smlJsonGetObj(char *payload){ +static char *smlJsonGetObj(char *payload) { int leftBracketCnt = 0; bool isInQuote = false; - while(*payload) { - if(*payload == '"' && *(payload - 1) != '\\'){ + while (*payload) { + if (*payload == '"' && *(payload - 1) != '\\') { isInQuote = !isInQuote; - }else if (!isInQuote && unlikely(*payload == '{')) { + } else if (!isInQuote && unlikely(*payload == '{')) { leftBracketCnt++; payload++; continue; - } - else if (!isInQuote && unlikely(*payload == '}')) { + } else if (!isInQuote && unlikely(*payload == '}')) { leftBracketCnt--; payload++; if (leftBracketCnt == 0) { @@ -246,55 +247,52 @@ static char* smlJsonGetObj(char *payload){ return NULL; } -int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){ +int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) { int index = 0; - while(*(*start)){ - if((*start)[0] != '"'){ + while (*(*start)) { + if ((*start)[0] != '"') { (*start)++; continue; } - if(unlikely(index >= OTD_JSON_FIELDS_NUM)) { - uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) - return -1; + if (unlikely(index >= OTD_JSON_FIELDS_NUM)) { + uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) return -1; } char *sTmp = *start; - if((*start)[1] == 'm' && (*start)[2] == 'e' && (*start)[3] == 't' - && (*start)[4] == 'r' && (*start)[5] == 'i' && (*start)[6] == 'c' && (*start)[7] == '"'){ - + if ((*start)[1] == 'm' && (*start)[2] == 'e' && (*start)[3] == 't' && (*start)[4] == 'r' && (*start)[5] == 'i' && + (*start)[6] == 'c' && (*start)[7] == '"') { (*start) += 8; bool isInQuote = false; - while(*(*start)){ - if(unlikely(!isInQuote && *(*start) == '"')){ + while (*(*start)) { + if (unlikely(!isInQuote && *(*start) == '"')) { (*start)++; offset[index++] = *start - sTmp; element->measure = (*start); isInQuote = true; continue; } - if(unlikely(isInQuote && *(*start) == '"')){ + if (unlikely(isInQuote && *(*start) == '"')) { element->measureLen = (*start) - element->measure; (*start)++; break; } (*start)++; } - }else if((*start)[1] == 't' && (*start)[2] == 'i' && (*start)[3] == 'm' - && (*start)[4] == 'e' && (*start)[5] == 's' && (*start)[6] == 't' - && (*start)[7] == 'a' && (*start)[8] == 'm' && (*start)[9] == 'p' && (*start)[10] == '"'){ - + } else if ((*start)[1] == 't' && (*start)[2] == 'i' && (*start)[3] == 'm' && (*start)[4] == 'e' && + (*start)[5] == 's' && (*start)[6] == 't' && (*start)[7] == 'a' && (*start)[8] == 'm' && + (*start)[9] == 'p' && (*start)[10] == '"') { (*start) += 11; bool hasColon = false; - while(*(*start)){ - if(unlikely(!hasColon && *(*start) == ':')){ + while (*(*start)) { + if (unlikely(!hasColon && *(*start) == ':')) { (*start)++; JUMP_JSON_SPACE((*start)) offset[index++] = *start - sTmp; element->timestamp = (*start); - if(*(*start) == '{'){ - char* tmp = smlJsonGetObj((*start)); - if(tmp){ + if (*(*start) == '{') { + char *tmp = smlJsonGetObj((*start)); + if (tmp) { element->timestampLen = tmp - (*start); *start = tmp; } @@ -303,27 +301,26 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){ hasColon = true; continue; } - if(unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))){ + if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) { element->timestampLen = (*start) - element->timestamp; break; } (*start)++; } - }else if((*start)[1] == 'v' && (*start)[2] == 'a' && (*start)[3] == 'l' - && (*start)[4] == 'u' && (*start)[5] == 'e' && (*start)[6] == '"'){ - + } else if ((*start)[1] == 'v' && (*start)[2] == 'a' && (*start)[3] == 'l' && (*start)[4] == 'u' && + (*start)[5] == 'e' && (*start)[6] == '"') { (*start) += 7; bool hasColon = false; - while(*(*start)){ - if(unlikely(!hasColon && *(*start) == ':')){ + while (*(*start)) { + if (unlikely(!hasColon && *(*start) == ':')) { (*start)++; JUMP_JSON_SPACE((*start)) offset[index++] = *start - sTmp; element->cols = (*start); - if(*(*start) == '{'){ - char* tmp = smlJsonGetObj((*start)); - if(tmp){ + if (*(*start) == '{') { + char *tmp = smlJsonGetObj((*start)); + if (tmp) { element->colsLen = tmp - (*start); *start = tmp; } @@ -332,24 +329,24 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){ hasColon = true; continue; } - if(unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))){ + if (unlikely(hasColon && (*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32))) { element->colsLen = (*start) - element->cols; break; } (*start)++; } - }else if((*start)[1] == 't' && (*start)[2] == 'a' && (*start)[3] == 'g' - && (*start)[4] == 's' && (*start)[5] == '"'){ + } else if ((*start)[1] == 't' && (*start)[2] == 'a' && (*start)[3] == 'g' && (*start)[4] == 's' && + (*start)[5] == '"') { (*start) += 6; - while(*(*start)){ - if(unlikely(*(*start) == ':')){ + while (*(*start)) { + if (unlikely(*(*start) == ':')) { (*start)++; JUMP_JSON_SPACE((*start)) offset[index++] = *start - sTmp; element->tags = (*start); - char* tmp = smlJsonGetObj((*start)); - if(tmp){ + char *tmp = smlJsonGetObj((*start)); + if (tmp) { element->tagsLen = tmp - (*start); *start = tmp; } @@ -358,102 +355,100 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){ (*start)++; } } - if(*(*start) == '\0'){ + if (*(*start) == '\0') { break; } - if(*(*start) == '}'){ + if (*(*start) == '}') { (*start)++; break; } (*start)++; } - if(unlikely(index != OTD_JSON_FIELDS_NUM) || element->tags == NULL || element->cols == NULL || element->measure == NULL || element->timestamp == NULL) { - uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM) - return -1; + if (unlikely(index != OTD_JSON_FIELDS_NUM) || element->tags == NULL || element->cols == NULL || + element->measure == NULL || element->timestamp == NULL) { + uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM) return -1; } return 0; } -int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset){ +int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) { int index = 0; - while(*(*start)){ - if((*start)[0] != '"'){ + while (*(*start)) { + if ((*start)[0] != '"') { (*start)++; continue; } - if(unlikely(index >= OTD_JSON_FIELDS_NUM)) { - uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) - return -1; + if (unlikely(index >= OTD_JSON_FIELDS_NUM)) { + uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) return -1; } - if((*start)[1] == 'm'){ + if ((*start)[1] == 'm') { (*start) += offset[index++]; element->measure = *start; - while(*(*start)){ - if(unlikely(*(*start) == '"')){ + while (*(*start)) { + if (unlikely(*(*start) == '"')) { element->measureLen = (*start) - element->measure; (*start)++; break; } (*start)++; } - }else if((*start)[1] == 't' && (*start)[2] == 'i'){ + } else if ((*start)[1] == 't' && (*start)[2] == 'i') { (*start) += offset[index++]; element->timestamp = *start; - if(*(*start) == '{'){ - char* tmp = smlJsonGetObj((*start)); - if(tmp){ + if (*(*start) == '{') { + char *tmp = smlJsonGetObj((*start)); + if (tmp) { element->timestampLen = tmp - (*start); *start = tmp; } - }else{ - while(*(*start)){ - if(unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)){ + } else { + while (*(*start)) { + if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) { element->timestampLen = (*start) - element->timestamp; break; } (*start)++; } } - }else if((*start)[1] == 'v'){ + } else if ((*start)[1] == 'v') { (*start) += offset[index++]; element->cols = *start; - if(*(*start) == '{'){ - char* tmp = smlJsonGetObj((*start)); - if(tmp){ + if (*(*start) == '{') { + char *tmp = smlJsonGetObj((*start)); + if (tmp) { element->colsLen = tmp - (*start); *start = tmp; } - }else{ - while(*(*start)){ - if(unlikely( *(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)){ + } else { + while (*(*start)) { + if (unlikely(*(*start) == ',' || *(*start) == '}' || (*(*start)) <= 32)) { element->colsLen = (*start) - element->cols; break; } (*start)++; } } - }else if((*start)[1] == 't' && (*start)[2] == 'a'){ + } else if ((*start)[1] == 't' && (*start)[2] == 'a') { (*start) += offset[index++]; element->tags = (*start); - char* tmp = smlJsonGetObj((*start)); - if(tmp){ + char *tmp = smlJsonGetObj((*start)); + if (tmp) { element->tagsLen = tmp - (*start); *start = tmp; } } - if(*(*start) == '}'){ + if (*(*start) == '}') { (*start)++; break; } (*start)++; } - if(unlikely(index != 0 && index != OTD_JSON_FIELDS_NUM)) { - uError("elements != %d", OTD_JSON_FIELDS_NUM) - return -1; + if (unlikely(index != 0 && index != OTD_JSON_FIELDS_NUM)) { + uError("elements != %d", OTD_JSON_FIELDS_NUM) return -1; } return 0; } @@ -469,19 +464,18 @@ static inline int32_t smlParseMetricFromJSON(SSmlHandle *info, cJSON *metric, SS return TSDB_CODE_SUCCESS; } -const char *jsonName[OTD_JSON_FIELDS_NUM] = {"metric", "timestamp", "value", "tags"}; -static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks){ +const char *jsonName[OTD_JSON_FIELDS_NUM] = {"metric", "timestamp", "value", "tags"}; +static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) { for (int i = 0; i < OTD_JSON_FIELDS_NUM; ++i) { cJSON *child = root->child; - while(child != NULL) - { - if(strcasecmp(child->string, jsonName[i]) == 0){ + while (child != NULL) { + if (strcasecmp(child->string, jsonName[i]) == 0) { *marks[i] = child; break; } child = child->next; } - if(*marks[i] == NULL){ + if (*marks[i] == NULL) { uError("smlGetJsonElements error, not find mark:%d:%s", i, jsonName[i]); return -1; } @@ -692,15 +686,15 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo SArray *maxKVs = info->maxTagKVs; bool isSuperKVInit = true; SArray *superKV = NULL; - if(info->dataFormat){ - if(unlikely(!isSameMeasure)){ + if (info->dataFormat) { + if (unlikely(!isSameMeasure)) { SSmlSTableMeta **tmp = (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen); - SSmlSTableMeta *sMeta = NULL; - if(unlikely(tmp == NULL)){ - STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen); - if(pTableMeta == NULL){ + SSmlSTableMeta *sMeta = NULL; + if (unlikely(tmp == NULL)) { + STableMeta *pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen); + if (pTableMeta == NULL) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } sMeta = smlBuildSTableMeta(info->dataFormat); @@ -711,18 +705,18 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo info->currSTableMeta = (*tmp)->tableMeta; superKV = (*tmp)->tags; - if(unlikely(taosArrayGetSize(superKV) == 0)){ + if (unlikely(taosArrayGetSize(superKV) == 0)) { isSuperKVInit = false; } - taosArraySetSize(maxKVs, 0); + taosArrayClear(maxKVs); } - }else{ - taosArraySetSize(maxKVs, 0); + } else { + taosArrayClear(maxKVs); } - taosArraySetSize(preLineKV, 0); + taosArrayClear(preLineKV); int32_t tagNum = cJSON_GetArraySize(tags); - if(unlikely(tagNum == 0)){ + if (unlikely(tagNum == 0)) { uError("SML:Tag should not be empty"); return TSDB_CODE_TSC_INVALID_JSON; } @@ -731,7 +725,7 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo if (unlikely(tag == NULL)) { return TSDB_CODE_TSC_INVALID_JSON; } -// if(unlikely(tag == cMeasure)) continue; + // if(unlikely(tag == cMeasure)) continue; size_t keyLen = strlen(tag->string); if (unlikely(IS_INVALID_COL_LEN(keyLen))) { uError("OTD:Tag key length is 0 or too large than 64"); @@ -739,31 +733,32 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo } // add kv to SSmlKv - SSmlKv kv ={.key = tag->string, .keyLen = keyLen}; + SSmlKv kv = {.key = tag->string, .keyLen = keyLen}; // value ret = smlParseValueFromJSON(tag, &kv); if (unlikely(ret != TSDB_CODE_SUCCESS)) { return ret; } - if(info->dataFormat){ - if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){ + if (info->dataFormat) { + if (unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - if(isSameMeasure){ - if(unlikely(cnt >= taosArrayGetSize(maxKVs))) { + if (isSameMeasure) { + if (unlikely(cnt >= taosArrayGetSize(maxKVs))) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt); - if(unlikely(kv.length > maxKV->length)){ + if (unlikely(kv.length > maxKV->length)) { maxKV->length = kv.length; - SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen); - if(unlikely(NULL == tableMeta)){ + SSmlSTableMeta **tableMeta = + (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen); + if (unlikely(NULL == tableMeta)) { uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -772,49 +767,50 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo oldKV->length = kv.length; info->needModifySchema = true; } - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ - if(isSuperKVInit){ - if(unlikely(cnt >= taosArrayGetSize(superKV))) { + } else { + if (isSuperKVInit) { + if (unlikely(cnt >= taosArrayGetSize(superKV))) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt); - if(unlikely(kv.length > maxKV->length)) { + if (unlikely(kv.length > maxKV->length)) { maxKV->length = kv.length; - }else{ + } else { kv.length = maxKV->length; } info->needModifySchema = true; - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ + } else { taosArrayPush(superKV, &kv); } taosArrayPush(maxKVs, &kv); } - }else{ + } else { taosArrayPush(maxKVs, &kv); } taosArrayPush(preLineKV, &kv); cnt++; } - elements->measureTag = (char*)taosMemoryMalloc(elements->measureLen + elements->tagsLen); + elements->measureTag = (char *)taosMemoryMalloc(elements->measureLen + elements->tagsLen); memcpy(elements->measureTag, elements->measure, elements->measureLen); memcpy(elements->measureTag + elements->measureLen, elements->tags, elements->tagsLen); elements->measureTagsLen = elements->measureLen + elements->tagsLen; - SSmlTableInfo **tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen); + SSmlTableInfo **tmp = + (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen); SSmlTableInfo *tinfo = NULL; if (unlikely(tmp == NULL)) { tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen); @@ -835,15 +831,16 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo } } -// SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo)); -// *key = *elements; -// if(info->parseJsonByLib){ -// key->tags = taosMemoryMalloc(elements->tagsLen + 1); -// memcpy(key->tags, elements->tags, elements->tagsLen); -// key->tags[elements->tagsLen] = 0; -// } -// tinfo->key = key; - taosHashPut(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen, &tinfo, POINTER_BYTES); + // SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo)); + // *key = *elements; + // if(info->parseJsonByLib){ + // key->tags = taosMemoryMalloc(elements->tagsLen + 1); + // memcpy(key->tags, elements->tags, elements->tagsLen); + // key->tags[elements->tagsLen] = 0; + // } + // tinfo->key = key; + taosHashPut(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen, &tinfo, + POINTER_BYTES); tmp = &tinfo; } if (info->dataFormat) info->currTableDataCtx = (*tmp)->tableDataCtx; @@ -877,7 +874,7 @@ static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPr } if (timeDouble == 0) { - return taosGetTimestampNs()/smlFactorNS[toPrecision]; + return taosGetTimestampNs() / smlFactorNS[toPrecision]; } if (timeDouble < 0) { @@ -885,11 +882,11 @@ static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPr } int64_t tsInt64 = timeDouble; - size_t typeLen = strlen(type->valuestring); + size_t typeLen = strlen(type->valuestring); if (typeLen == 1 && (type->valuestring[0] == 's' || type->valuestring[0] == 'S')) { // seconds int8_t fromPrecision = TSDB_TIME_PRECISION_SECONDS; - if(smlFactorS[toPrecision] < INT64_MAX / tsInt64){ + if (smlFactorS[toPrecision] < INT64_MAX / tsInt64) { return tsInt64 * smlFactorS[toPrecision]; } return -1; @@ -938,11 +935,10 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) { } if (unlikely(timeDouble < 0)) { - smlBuildInvalidDataMsg(&info->msgBuf, - "timestamp is negative", NULL); + smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is negative", NULL); return timeDouble; - }else if (unlikely(timeDouble == 0)) { - return taosGetTimestampNs()/smlFactorNS[toPrecision]; + } else if (unlikely(timeDouble == 0)) { + return taosGetTimestampNs() / smlFactorNS[toPrecision]; } uint8_t tsLen = smlGetTimestampLen((int64_t)timeDouble); @@ -954,19 +950,18 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) { return -1; } int64_t tsInt64 = timeDouble; - if(fromPrecision == TSDB_TIME_PRECISION_SECONDS){ - if(smlFactorS[toPrecision] < INT64_MAX / tsInt64){ + if (fromPrecision == TSDB_TIME_PRECISION_SECONDS) { + if (smlFactorS[toPrecision] < INT64_MAX / tsInt64) { return tsInt64 * smlFactorS[toPrecision]; } return -1; - }else{ + } else { return convertTimePrecision(timeDouble, fromPrecision, toPrecision); } } else if (cJSON_IsObject(timestamp)) { return smlParseTSFromJSONObj(info, timestamp, toPrecision); } else { - smlBuildInvalidDataMsg(&info->msgBuf, - "invalidate json", NULL); + smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL); return -1; } } @@ -1011,7 +1006,7 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo bool needFree = info->dataFormat; elements->tags = cJSON_PrintUnformatted(tagsJson); elements->tagsLen = strlen(elements->tags); - if(is_same_child_table_telnet(elements, &info->preLine) != 0) { + if (is_same_child_table_telnet(elements, &info->preLine) != 0) { ret = smlParseTagsFromJSON(info, tagsJson, elements); if (unlikely(ret)) { uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id); @@ -1019,16 +1014,16 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo elements->tags = NULL; return ret; } - }else{ + } else { elements->measureTag = info->preLine.measureTag; } - if(needFree){ + if (needFree) { taosMemoryFree(elements->tags); elements->tags = NULL; } - if(unlikely(info->reRun)){ + if (unlikely(info->reRun)) { return TSDB_CODE_SUCCESS; } @@ -1039,14 +1034,18 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id); return TSDB_CODE_INVALID_TIMESTAMP; } - SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; + SSmlKv kvTs = {.key = TS, + .keyLen = TS_LEN, + .type = TSDB_DATA_TYPE_TIMESTAMP, + .i = ts, + .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; - if(info->dataFormat){ + if (info->dataFormat) { ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kvTs, 0); - if(ret == TSDB_CODE_SUCCESS){ + if (ret == TSDB_CODE_SUCCESS) { ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 1); } - if(ret == TSDB_CODE_SUCCESS){ + if (ret == TSDB_CODE_SUCCESS) { ret = smlBuildRow(info->currTableDataCtx); } clearColValArray(info->currTableDataCtx->pValues); @@ -1054,8 +1053,8 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); return ret; } - }else{ - if(elements->colArray == NULL){ + } else { + if (elements->colArray == NULL) { elements->colArray = taosArrayInit(16, sizeof(SSmlKv)); } taosArrayPush(elements->colArray, &kvTs); @@ -1098,20 +1097,20 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { info->lines = NULL; } ret = smlClearForRerun(info); - if(ret != TSDB_CODE_SUCCESS){ + if (ret != TSDB_CODE_SUCCESS) { return ret; } info->parseJsonByLib = true; cJSON *head = (payloadNum == 1 && cJSON_IsObject(info->root)) ? info->root : info->root->child; - int cnt = 0; + int cnt = 0; cJSON *dataPoint = head; while (dataPoint) { - if(info->dataFormat) { + if (info->dataFormat) { SSmlLineInfo element = {0}; ret = smlParseJSONStringExt(info, dataPoint, &element); - }else{ + } else { ret = smlParseJSONStringExt(info, dataPoint, info->lines + cnt); } if (unlikely(ret != TSDB_CODE_SUCCESS)) { @@ -1119,12 +1118,12 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { return ret; } - if(unlikely(info->reRun)){ + if (unlikely(info->reRun)) { cnt = 0; dataPoint = head; info->lineNum = payloadNum; ret = smlClearForRerun(info); - if(ret != TSDB_CODE_SUCCESS){ + if (ret != TSDB_CODE_SUCCESS) { return ret; } continue; @@ -1139,9 +1138,9 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) { static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *elements) { int32_t ret = TSDB_CODE_SUCCESS; - if(info->offset[0] == 0){ + if (info->offset[0] == 0) { ret = smlJsonParseObjFirst(start, elements, info->offset); - }else{ + } else { ret = smlJsonParseObj(start, elements, info->offset); } @@ -1149,7 +1148,7 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * return TSDB_CODE_TSC_INVALID_VALUE; } - if(unlikely(**start == '\0' && elements->measure == NULL)) return TSDB_CODE_SUCCESS; + if (unlikely(**start == '\0' && elements->measure == NULL)) return TSDB_CODE_SUCCESS; if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen))) { smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL); @@ -1161,10 +1160,10 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * if (unlikely(elements->colsLen == 0)) { uError("SML:colsLen == 0"); return TSDB_CODE_TSC_INVALID_VALUE; - }else if(unlikely(elements->cols[0] == '{')){ + } else if (unlikely(elements->cols[0] == '{')) { char tmp = elements->cols[elements->colsLen]; elements->cols[elements->colsLen] = '\0'; - cJSON* valueJson = cJSON_Parse(elements->cols); + cJSON *valueJson = cJSON_Parse(elements->cols); if (unlikely(valueJson == NULL)) { uError("SML:0x%" PRIx64 " parse json cols failed:%s", info->id, elements->cols); return TSDB_CODE_TSC_INVALID_JSON; @@ -1177,16 +1176,16 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * return TSDB_CODE_TSC_INVALID_VALUE; } elements->cols[elements->colsLen] = tmp; - }else if(smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS){ + } else if (smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) { uError("SML:cols invalidate:%s", elements->cols); return TSDB_CODE_TSC_INVALID_VALUE; } // Parse tags - if(is_same_child_table_telnet(elements, &info->preLine) != 0){ + if (is_same_child_table_telnet(elements, &info->preLine) != 0) { char tmp = *(elements->tags + elements->tagsLen); *(elements->tags + elements->tagsLen) = 0; - cJSON* tagsJson = cJSON_Parse(elements->tags); + cJSON *tagsJson = cJSON_Parse(elements->tags); *(elements->tags + elements->tagsLen) = tmp; if (unlikely(tagsJson == NULL)) { uError("SML:0x%" PRIx64 " parse json tag failed:%s", info->id, elements->tags); @@ -1199,24 +1198,24 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id); return ret; } - }else{ + } else { elements->measureTag = info->preLine.measureTag; } - if(unlikely(info->reRun)){ + if (unlikely(info->reRun)) { return TSDB_CODE_SUCCESS; } // Parse timestamp // notice!!! put ts back to tag to ensure get meta->precision int64_t ts = 0; - if(unlikely(elements->timestampLen == 0)){ + if (unlikely(elements->timestampLen == 0)) { uError("OTD:0x%" PRIx64 " elements->timestampLen == 0", info->id); return TSDB_CODE_INVALID_TIMESTAMP; - }else if(elements->timestamp[0] == '{'){ + } else if (elements->timestamp[0] == '{') { char tmp = elements->timestamp[elements->timestampLen]; elements->cols[elements->timestampLen] = '\0'; - cJSON* tsJson = cJSON_Parse(elements->timestamp); + cJSON *tsJson = cJSON_Parse(elements->timestamp); ts = smlParseTSFromJSON(info, tsJson); if (unlikely(ts < 0)) { uError("SML:0x%" PRIx64 " Unable to parse timestamp from JSON payload:%s", info->id, elements->timestamp); @@ -1226,21 +1225,25 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * } elements->timestamp[elements->timestampLen] = tmp; cJSON_Delete(tsJson); - }else{ + } else { ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen); if (unlikely(ts < 0)) { uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id); return TSDB_CODE_INVALID_TIMESTAMP; } } - SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; + SSmlKv kvTs = {.key = TS, + .keyLen = TS_LEN, + .type = TSDB_DATA_TYPE_TIMESTAMP, + .i = ts, + .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; - if(info->dataFormat){ + if (info->dataFormat) { ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kvTs, 0); - if(ret == TSDB_CODE_SUCCESS){ + if (ret == TSDB_CODE_SUCCESS) { ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 1); } - if(ret == TSDB_CODE_SUCCESS){ + if (ret == TSDB_CODE_SUCCESS) { ret = smlBuildRow(info->currTableDataCtx); } clearColValArray(info->currTableDataCtx->pValues); @@ -1248,8 +1251,8 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo * smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); return ret; } - }else{ - if(elements->colArray == NULL){ + } else { + if (elements->colArray == NULL) { elements->colArray = taosArrayInit(16, sizeof(SSmlKv)); } taosArrayPush(elements->colArray, &kvTs); @@ -1265,43 +1268,43 @@ int32_t smlParseJSON(SSmlHandle *info, char *payload) { int32_t ret = TSDB_CODE_SUCCESS; uDebug("SML:0x%" PRIx64 "json:%s", info->id, payload); - int cnt = 0; + int cnt = 0; char *dataPointStart = payload; while (1) { - if(info->dataFormat) { + if (info->dataFormat) { SSmlLineInfo element = {0}; ret = smlParseJSONString(info, &dataPointStart, &element); - if(element.measureTagsLen != 0) taosMemoryFree(element.measureTag); - }else{ - if(cnt >= payloadNum){ + if (element.measureTagsLen != 0) taosMemoryFree(element.measureTag); + } else { + if (cnt >= payloadNum) { payloadNum = payloadNum << 1; - void* tmp = taosMemoryRealloc(info->lines, payloadNum * sizeof(SSmlLineInfo)); - if(tmp != NULL){ - info->lines = (SSmlLineInfo*)tmp; + void *tmp = taosMemoryRealloc(info->lines, payloadNum * sizeof(SSmlLineInfo)); + if (tmp != NULL) { + info->lines = (SSmlLineInfo *)tmp; memset(info->lines + cnt, 0, (payloadNum - cnt) * sizeof(SSmlLineInfo)); } } ret = smlParseJSONString(info, &dataPointStart, info->lines + cnt); - if((info->lines + cnt)->measure == NULL) break; + if ((info->lines + cnt)->measure == NULL) break; } if (unlikely(ret != TSDB_CODE_SUCCESS)) { uError("SML:0x%" PRIx64 " Invalid JSON Payload 1:%s", info->id, payload); return smlParseJSONExt(info, payload); } - if(unlikely(info->reRun)){ + if (unlikely(info->reRun)) { cnt = 0; dataPointStart = payload; info->lineNum = payloadNum; ret = smlClearForRerun(info); - if(ret != TSDB_CODE_SUCCESS){ + if (ret != TSDB_CODE_SUCCESS) { return ret; } continue; } cnt++; - if(*dataPointStart == '\0') break; + if (*dataPointStart == '\0') break; } info->lineNum = cnt; diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index e1fd8c2a81..f2c212928e 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -21,23 +21,24 @@ #include "clientSml.h" // comma , -//#define IS_SLASH_COMMA(sql) (*(sql) == COMMA && *((sql)-1) == SLASH) -#define IS_COMMA(sql) (*(sql) == COMMA && *((sql)-1) != SLASH) +// #define IS_SLASH_COMMA(sql) (*(sql) == COMMA && *((sql)-1) == SLASH) +#define IS_COMMA(sql) (*(sql) == COMMA && *((sql)-1) != SLASH) // space -//#define IS_SLASH_SPACE(sql) (*(sql) == SPACE && *((sql)-1) == SLASH) -#define IS_SPACE(sql) (*(sql) == SPACE && *((sql)-1) != SLASH) +// #define IS_SLASH_SPACE(sql) (*(sql) == SPACE && *((sql)-1) == SLASH) +#define IS_SPACE(sql) (*(sql) == SPACE && *((sql)-1) != SLASH) // equal = -//#define IS_SLASH_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) == SLASH) -#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) != SLASH) +// #define IS_SLASH_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) == SLASH) +#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) != SLASH) // quote " -//#define IS_SLASH_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) == SLASH) -#define IS_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) != SLASH) +// #define IS_SLASH_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) == SLASH) +#define IS_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) != SLASH) // SLASH -//#define IS_SLASH_SLASH(sql) (*(sql) == SLASH && *((sql)-1) == SLASH) +// #define IS_SLASH_SLASH(sql) (*(sql) == SLASH && *((sql)-1) == SLASH) -#define IS_SLASH_LETTER(sql) \ - (*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL || *(sql) == QUOTE || *(sql) == SLASH)) \ -// (IS_SLASH_COMMA(sql) || IS_SLASH_SPACE(sql) || IS_SLASH_EQUAL(sql) || IS_SLASH_QUOTE(sql) || IS_SLASH_SLASH(sql)) +#define IS_SLASH_LETTER(sql) \ + (*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL || *(sql) == QUOTE || \ + *(sql) == SLASH)) // (IS_SLASH_COMMA(sql) || IS_SLASH_SPACE(sql) || IS_SLASH_EQUAL(sql) || + // IS_SLASH_QUOTE(sql) || IS_SLASH_SLASH(sql)) #define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len)) @@ -53,15 +54,15 @@ #define BINARY_ADD_LEN 2 // "binary" 2 means " " #define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" " -uint8_t smlPrecisionConvert[7] = {TSDB_TIME_PRECISION_NANO, TSDB_TIME_PRECISION_HOURS, TSDB_TIME_PRECISION_MINUTES, +uint8_t smlPrecisionConvert[7] = {TSDB_TIME_PRECISION_NANO, TSDB_TIME_PRECISION_HOURS, TSDB_TIME_PRECISION_MINUTES, TSDB_TIME_PRECISION_SECONDS, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO}; static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t len) { uint8_t toPrecision = info->currSTableMeta ? info->currSTableMeta->tableInfo.precision : TSDB_TIME_PRECISION_NANO; - if(unlikely(len == 0 || (len == 1 && data[0] == '0'))){ - return taosGetTimestampNs()/smlFactorNS[toPrecision]; + if (unlikely(len == 0 || (len == 1 && data[0] == '0'))) { + return taosGetTimestampNs() / smlFactorNS[toPrecision]; } uint8_t fromPrecision = smlPrecisionConvert[info->precision]; @@ -75,7 +76,7 @@ static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t le } int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { - if (pVal->value[0] == '"'){ // binary + if (pVal->value[0] == '"') { // binary if (pVal->length >= 2 && pVal->value[pVal->length - 1] == '"') { pVal->type = TSDB_DATA_TYPE_BINARY; pVal->length -= BINARY_ADD_LEN; @@ -88,8 +89,8 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } - if(pVal->value[0] == 'l' || pVal->value[0] == 'L'){ // nchar - if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3){ + if (pVal->value[0] == 'l' || pVal->value[0] == 'L') { // nchar + if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) { pVal->type = TSDB_DATA_TYPE_NCHAR; pVal->length -= NCHAR_ADD_LEN; if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { @@ -101,10 +102,10 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } - if (pVal->value[0] == 't' || pVal->value[0] == 'T'){ - if(pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') - && (pVal->value[2] == 'u' || pVal->value[2] == 'U') - && (pVal->value[3] == 'e' || pVal->value[3] == 'E'))){ + if (pVal->value[0] == 't' || pVal->value[0] == 'T') { + if (pVal->length == 1 || + (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') && + (pVal->value[2] == 'u' || pVal->value[2] == 'U') && (pVal->value[3] == 'e' || pVal->value[3] == 'E'))) { pVal->i = TSDB_TRUE; pVal->type = TSDB_DATA_TYPE_BOOL; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; @@ -113,11 +114,11 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } - if (pVal->value[0] == 'f' || pVal->value[0] == 'F'){ - if(pVal->length == 1 || (pVal->length == 5 && (pVal->value[1] == 'a' || pVal->value[1] == 'A') - && (pVal->value[2] == 'l' || pVal->value[2] == 'L') - && (pVal->value[3] == 's' || pVal->value[3] == 'S') - && (pVal->value[4] == 'e' || pVal->value[4] == 'E'))){ + if (pVal->value[0] == 'f' || pVal->value[0] == 'F') { + if (pVal->length == 1 || + (pVal->length == 5 && (pVal->value[1] == 'a' || pVal->value[1] == 'A') && + (pVal->value[2] == 'l' || pVal->value[2] == 'L') && (pVal->value[3] == 's' || pVal->value[3] == 'S') && + (pVal->value[4] == 'e' || pVal->value[4] == 'E'))) { pVal->i = TSDB_FALSE; pVal->type = TSDB_DATA_TYPE_BOOL; pVal->length = (int16_t)tDataTypes[pVal->type].bytes; @@ -135,9 +136,9 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } -static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, - SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){ - if(isSameCTable){ +static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement, bool isSameMeasure, + bool isSameCTable) { + if (isSameCTable) { return TSDB_CODE_SUCCESS; } @@ -146,15 +147,16 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SArray *maxKVs = info->maxTagKVs; bool isSuperKVInit = true; SArray *superKV = NULL; - if(info->dataFormat){ - if(unlikely(!isSameMeasure)){ - SSmlSTableMeta **tmp = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); + if (info->dataFormat) { + if (unlikely(!isSameMeasure)) { + SSmlSTableMeta **tmp = + (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); SSmlSTableMeta *sMeta = NULL; - if(unlikely(tmp == NULL)){ - STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen); - if(pTableMeta == NULL){ + if (unlikely(tmp == NULL)) { + STableMeta *pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen); + if (pTableMeta == NULL) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } sMeta = smlBuildSTableMeta(info->dataFormat); @@ -165,15 +167,15 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, info->currSTableMeta = (*tmp)->tableMeta; superKV = (*tmp)->tags; - if(unlikely(taosArrayGetSize(superKV) == 0)){ + if (unlikely(taosArrayGetSize(superKV) == 0)) { isSuperKVInit = false; } - taosArraySetSize(maxKVs, 0); + taosArrayClear(maxKVs); } - }else{ - taosArraySetSize(maxKVs, 0); + } else { + taosArrayClear(maxKVs); } - taosArraySetSize(preLineKV, 0); + taosArrayClear(preLineKV); while (*sql < sqlEnd) { if (unlikely(IS_SPACE(*sql))) { @@ -183,7 +185,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, bool hasSlash = false; // parse key const char *key = *sql; - size_t keyLen = 0; + size_t keyLen = 0; while (*sql < sqlEnd) { if (unlikely(IS_COMMA(*sql))) { smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql); @@ -194,12 +196,12 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, (*sql)++; break; } - if(!hasSlash){ + if (!hasSlash) { hasSlash = (*(*sql) == SLASH); } (*sql)++; } - if(unlikely(hasSlash)) { + if (unlikely(hasSlash)) { PROCESS_SLASH(key, keyLen) } @@ -210,18 +212,18 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, // parse value const char *value = *sql; - size_t valueLen = 0; + size_t valueLen = 0; hasSlash = false; while (*sql < sqlEnd) { // parse value if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) { break; - }else if (unlikely(IS_EQUAL(*sql))) { + } else if (unlikely(IS_EQUAL(*sql))) { smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql); return TSDB_CODE_SML_INVALID_DATA; } - if(!hasSlash){ + if (!hasSlash) { hasSlash = (*(*sql) == SLASH); } @@ -234,7 +236,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, return TSDB_CODE_SML_INVALID_DATA; } - if(unlikely(hasSlash)) { + if (unlikely(hasSlash)) { PROCESS_SLASH(value, valueLen) } @@ -243,24 +245,25 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, } SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen}; - if(info->dataFormat){ - if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){ + if (info->dataFormat) { + if (unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - if(isSameMeasure){ - if(unlikely(cnt >= taosArrayGetSize(maxKVs))) { + if (isSameMeasure) { + if (unlikely(cnt >= taosArrayGetSize(maxKVs))) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt); - if(unlikely(kv.length > maxKV->length)){ + if (unlikely(kv.length > maxKV->length)) { maxKV->length = kv.length; - SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); - if(unlikely(NULL == tableMeta)){ + SSmlSTableMeta **tableMeta = + (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); + if (unlikely(NULL == tableMeta)) { uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -269,49 +272,49 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, oldKV->length = kv.length; info->needModifySchema = true; } - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ - if(isSuperKVInit){ - if(unlikely(cnt >= taosArrayGetSize(superKV))) { + } else { + if (isSuperKVInit) { + if (unlikely(cnt >= taosArrayGetSize(superKV))) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt); - if(unlikely(kv.length > maxKV->length)) { + if (unlikely(kv.length > maxKV->length)) { maxKV->length = kv.length; - }else{ + } else { kv.length = maxKV->length; } info->needModifySchema = true; - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ + } else { taosArrayPush(superKV, &kv); } taosArrayPush(maxKVs, &kv); } - }else{ + } else { taosArrayPush(maxKVs, &kv); } taosArrayPush(preLineKV, &kv); cnt++; - if(IS_SPACE(*sql)){ + if (IS_SPACE(*sql)) { break; } (*sql)++; } - void* oneTable = taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen); + void *oneTable = taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen); if ((oneTable != NULL)) { return TSDB_CODE_SUCCESS; } @@ -324,10 +327,10 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, smlSetCTableName(tinfo); tinfo->uid = info->uid++; - if(info->dataFormat) { + if (info->dataFormat) { info->currSTableMeta->uid = tinfo->uid; tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta); - if(tinfo->tableDataCtx == NULL){ + if (tinfo->tableDataCtx == NULL) { smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL); return TSDB_CODE_SML_INVALID_DATA; } @@ -338,15 +341,16 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, return TSDB_CODE_SUCCESS; } -static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, - SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){ +static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement, bool isSameMeasure, + bool isSameCTable) { int cnt = 0; SArray *preLineKV = info->preLineColKV; bool isSuperKVInit = true; SArray *superKV = NULL; - if(info->dataFormat){ - if(unlikely(!isSameCTable)){ - SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen); + if (info->dataFormat) { + if (unlikely(!isSameCTable)) { + SSmlTableInfo **oneTable = + (SSmlTableInfo **)taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen); if (unlikely(oneTable == NULL)) { smlBuildInvalidDataMsg(&info->msgBuf, "child table should inside", currElement->measure); return TSDB_CODE_SML_INVALID_DATA; @@ -354,14 +358,15 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, info->currTableDataCtx = (*oneTable)->tableDataCtx; } - if(unlikely(!isSameMeasure)){ - SSmlSTableMeta **tmp = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); + if (unlikely(!isSameMeasure)) { + SSmlSTableMeta **tmp = + (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); SSmlSTableMeta *sMeta = NULL; - if(unlikely(tmp == NULL)){ - STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen); - if(pTableMeta == NULL){ + if (unlikely(tmp == NULL)) { + STableMeta *pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen); + if (pTableMeta == NULL) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } sMeta = smlBuildSTableMeta(info->dataFormat); @@ -371,10 +376,10 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, } info->currSTableMeta = (*tmp)->tableMeta; superKV = (*tmp)->cols; - if(unlikely(taosArrayGetSize(superKV) == 0)){ + if (unlikely(taosArrayGetSize(superKV) == 0)) { isSuperKVInit = false; } - taosArraySetSize(preLineKV, 0); + taosArrayClear(preLineKV); } } @@ -386,7 +391,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, bool hasSlash = false; // parse key const char *key = *sql; - size_t keyLen = 0; + size_t keyLen = 0; while (*sql < sqlEnd) { if (unlikely(IS_COMMA(*sql))) { smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql); @@ -397,12 +402,12 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, (*sql)++; break; } - if(!hasSlash){ + if (!hasSlash) { hasSlash = (*(*sql) == SLASH); } (*sql)++; } - if(unlikely(hasSlash)) { + if (unlikely(hasSlash)) { PROCESS_SLASH(key, keyLen) } @@ -413,9 +418,9 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, // parse value const char *value = *sql; - size_t valueLen = 0; - hasSlash = false; - bool isInQuote = false; + size_t valueLen = 0; + hasSlash = false; + bool isInQuote = false; while (*sql < sqlEnd) { // parse value if (unlikely(IS_QUOTE(*sql))) { @@ -423,7 +428,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, (*sql)++; continue; } - if (!isInQuote){ + if (!isInQuote) { if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) { break; } else if (unlikely(IS_EQUAL(*sql))) { @@ -431,7 +436,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, return TSDB_CODE_SML_INVALID_DATA; } } - if(!hasSlash){ + if (!hasSlash) { hasSlash = (*(*sql) == SLASH); } @@ -447,22 +452,22 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value); return TSDB_CODE_SML_INVALID_DATA; } - if(unlikely(hasSlash)) { + if (unlikely(hasSlash)) { PROCESS_SLASH(value, valueLen) } - SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen}; + SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen}; int32_t ret = smlParseValue(&kv, &info->msgBuf); if (ret != TSDB_CODE_SUCCESS) { smlBuildInvalidDataMsg(&info->msgBuf, "smlParseValue error", value); return ret; } - if(info->dataFormat){ - //cnt begin 0, add ts so + 2 - if(unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)){ + if (info->dataFormat) { + // cnt begin 0, add ts so + 2 + if (unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } // bind data @@ -470,27 +475,28 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, if (unlikely(ret != TSDB_CODE_SUCCESS)) { uError("smlBuildCol error, retry"); info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - if(isSameMeasure){ - if(cnt >= taosArrayGetSize(preLineKV)) { + if (isSameMeasure) { + if (cnt >= taosArrayGetSize(preLineKV)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(preLineKV, cnt); - if(kv.type != maxKV->type){ + if (kv.type != maxKV->type) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - if(unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)){ + if (unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)) { maxKV->length = kv.length; - SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); - if(unlikely(NULL == tableMeta)){ + SSmlSTableMeta **tableMeta = + (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen); + if (unlikely(NULL == tableMeta)) { uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -499,53 +505,52 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, oldKV->length = kv.length; info->needModifySchema = true; } - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ - if(isSuperKVInit){ - if(unlikely(cnt >= taosArrayGetSize(superKV))) { + } else { + if (isSuperKVInit) { + if (unlikely(cnt >= taosArrayGetSize(superKV))) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt); - if(unlikely(kv.type != maxKV->type)){ + if (unlikely(kv.type != maxKV->type)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - if(IS_VAR_DATA_TYPE(kv.type)){ - if(kv.length > maxKV->length) { + if (IS_VAR_DATA_TYPE(kv.type)) { + if (kv.length > maxKV->length) { maxKV->length = kv.length; - }else{ + } else { kv.length = maxKV->length; } info->needModifySchema = true; } - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ + } else { taosArrayPush(superKV, &kv); } taosArrayPush(preLineKV, &kv); } - }else{ - if(currElement->colArray == NULL){ - currElement->colArray = taosArrayInit(16, sizeof(SSmlKv)); - taosArraySetSize(currElement->colArray, 1); + } else { + if (currElement->colArray == NULL) { + currElement->colArray = taosArrayInit_s(16, sizeof(SSmlKv), 1); } - taosArrayPush(currElement->colArray, &kv); //reserve for timestamp + taosArrayPush(currElement->colArray, &kv); // reserve for timestamp } cnt++; - if(IS_SPACE(*sql)){ + if (IS_SPACE(*sql)) { break; } (*sql)++; @@ -583,8 +588,8 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine } // to get measureTagsLen before - const char* tmp = sql; - while (tmp < sqlEnd){ + const char *tmp = sql; + while (tmp < sqlEnd) { if (unlikely(IS_SPACE(tmp))) { break; } @@ -594,10 +599,10 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine bool isSameCTable = false; bool isSameMeasure = false; - if(IS_SAME_CHILD_TABLE){ + if (IS_SAME_CHILD_TABLE) { isSameCTable = true; isSameMeasure = true; - }else if(info->dataFormat) { + } else if (info->dataFormat) { isSameMeasure = IS_SAME_SUPER_TABLE; } // parse tag @@ -605,10 +610,10 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine elements->tags = sql; int ret = smlParseTagKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable); - if(unlikely(ret != TSDB_CODE_SUCCESS)){ + if (unlikely(ret != TSDB_CODE_SUCCESS)) { return ret; } - if(unlikely(info->reRun)){ + if (unlikely(info->reRun)) { return TSDB_CODE_SUCCESS; } @@ -620,11 +625,11 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine elements->cols = sql; ret = smlParseColKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable); - if(unlikely(ret != TSDB_CODE_SUCCESS)){ + if (unlikely(ret != TSDB_CODE_SUCCESS)) { return ret; } - if(unlikely(info->reRun)){ + if (unlikely(info->reRun)) { return TSDB_CODE_SUCCESS; } @@ -651,16 +656,19 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine return TSDB_CODE_INVALID_TIMESTAMP; } // add ts to - SSmlKv kv = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; - if(info->dataFormat){ + SSmlKv kv = {.key = TS, + .keyLen = TS_LEN, + .type = TSDB_DATA_TYPE_TIMESTAMP, + .i = ts, + .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; + if (info->dataFormat) { smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0); smlBuildRow(info->currTableDataCtx); clearColValArray(info->currTableDataCtx->pValues); - }else{ + } else { taosArraySet(elements->colArray, 0, &kv); } info->preLine = *elements; return ret; } - diff --git a/source/client/src/clientSmlTelnet.c b/source/client/src/clientSmlTelnet.c index 7f669ffead..ab071305fa 100644 --- a/source/client/src/clientSmlTelnet.c +++ b/source/client/src/clientSmlTelnet.c @@ -20,16 +20,17 @@ #include "clientSml.h" -int32_t is_same_child_table_telnet(const void *a, const void *b){ +int32_t is_same_child_table_telnet(const void *a, const void *b) { SSmlLineInfo *t1 = (SSmlLineInfo *)a; SSmlLineInfo *t2 = (SSmlLineInfo *)b; -// uError("is_same_child_table_telnet len:%d,%d %s,%s @@@ len:%d,%d %s,%s", t1->measureLen, t2->measureLen, -// t1->measure, t2->measure, t1->tagsLen, t2->tagsLen, t1->tags, t2->tags); - if(t1 == NULL || t2 == NULL || t1->measure == NULL || t2->measure == NULL - || t1->tags == NULL || t2->tags == NULL) + // uError("is_same_child_table_telnet len:%d,%d %s,%s @@@ len:%d,%d %s,%s", t1->measureLen, t2->measureLen, + // t1->measure, t2->measure, t1->tagsLen, t2->tagsLen, t1->tags, t2->tags); + if (t1 == NULL || t2 == NULL || t1->measure == NULL || t2->measure == NULL || t1->tags == NULL || t2->tags == NULL) return 1; - return (((t1->measureLen == t2->measureLen) && memcmp(t1->measure, t2->measure, t1->measureLen) == 0) - && ((t1->tagsLen == t2->tagsLen) && memcmp(t1->tags, t2->tags, t1->tagsLen) == 0)) ? 0 : 1; + return (((t1->measureLen == t2->measureLen) && memcmp(t1->measure, t2->measure, t1->measureLen) == 0) && + ((t1->tagsLen == t2->tagsLen) && memcmp(t1->tags, t2->tags, t1->tagsLen) == 0)) + ? 0 + : 1; } int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) { @@ -40,7 +41,7 @@ int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) { return -1; } if (unlikely(len == 1 && data[0] == '0')) { - return taosGetTimestampNs()/smlFactorNS[toPrecision]; + return taosGetTimestampNs() / smlFactorNS[toPrecision]; } int8_t fromPrecision = smlGetTsTypeByLen(len); if (unlikely(fromPrecision == -1)) { @@ -56,7 +57,6 @@ int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) { return ts; } - static void smlParseTelnetElement(char **sql, char *sqlEnd, char **data, int32_t *len) { while (*sql < sqlEnd) { if (unlikely((**sql != SPACE && !(*data)))) { @@ -70,7 +70,7 @@ static void smlParseTelnetElement(char **sql, char *sqlEnd, char **data, int32_t } static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SSmlLineInfo *elements, SSmlMsgBuf *msg) { - if(is_same_child_table_telnet(elements, &info->preLine) == 0){ + if (is_same_child_table_telnet(elements, &info->preLine) == 0) { elements->measureTag = info->preLine.measureTag; return TSDB_CODE_SUCCESS; } @@ -82,15 +82,15 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS SArray *maxKVs = info->maxTagKVs; bool isSuperKVInit = true; SArray *superKV = NULL; - if(info->dataFormat){ - if(!isSameMeasure){ + if (info->dataFormat) { + if (!isSameMeasure) { SSmlSTableMeta **tmp = (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen); - SSmlSTableMeta *sMeta = NULL; - if(unlikely(tmp == NULL)){ - STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen); - if(pTableMeta == NULL){ + SSmlSTableMeta *sMeta = NULL; + if (unlikely(tmp == NULL)) { + STableMeta *pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen); + if (pTableMeta == NULL) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } sMeta = smlBuildSTableMeta(info->dataFormat); @@ -101,23 +101,23 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS info->currSTableMeta = (*tmp)->tableMeta; superKV = (*tmp)->tags; - if(unlikely(taosArrayGetSize(superKV) == 0)){ + if (unlikely(taosArrayGetSize(superKV) == 0)) { isSuperKVInit = false; } - taosArraySetSize(maxKVs, 0); + taosArrayClear(maxKVs); } - }else{ - taosArraySetSize(maxKVs, 0); + } else { + taosArrayClear(maxKVs); } - taosArraySetSize(preLineKV, 0); + taosArrayClear(preLineKV); const char *sql = data; while (sql < sqlEnd) { JUMP_SPACE(sql, sqlEnd) if (unlikely(*sql == '\0')) break; const char *key = sql; - size_t keyLen = 0; + size_t keyLen = 0; // parse key while (sql < sqlEnd) { @@ -137,14 +137,14 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key); return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH; } -// if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) { -// smlBuildInvalidDataMsg(msg, "dumplicate key", key); -// return TSDB_CODE_TSC_DUP_NAMES; -// } + // if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) { + // smlBuildInvalidDataMsg(msg, "dumplicate key", key); + // return TSDB_CODE_TSC_DUP_NAMES; + // } // parse value const char *value = sql; - size_t valueLen = 0; + size_t valueLen = 0; while (sql < sqlEnd) { // parse value if (unlikely(*sql == SPACE)) { @@ -169,24 +169,25 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen}; - if(info->dataFormat){ - if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){ + if (info->dataFormat) { + if (unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - if(isSameMeasure){ - if(unlikely(cnt >= taosArrayGetSize(maxKVs))) { + if (isSameMeasure) { + if (unlikely(cnt >= taosArrayGetSize(maxKVs))) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt); - if(unlikely(kv.length > maxKV->length)){ + if (unlikely(kv.length > maxKV->length)) { maxKV->length = kv.length; - SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen); - if(unlikely(NULL == tableMeta)){ + SSmlSTableMeta **tableMeta = + (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen); + if (unlikely(NULL == tableMeta)) { uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -195,49 +196,50 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS oldKV->length = kv.length; info->needModifySchema = true; } - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ - if(isSuperKVInit){ - if(unlikely(cnt >= taosArrayGetSize(superKV))) { + } else { + if (isSuperKVInit) { + if (unlikely(cnt >= taosArrayGetSize(superKV))) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt); - if(unlikely(kv.length > maxKV->length)) { + if (unlikely(kv.length > maxKV->length)) { maxKV->length = kv.length; - }else{ + } else { kv.length = maxKV->length; } info->needModifySchema = true; - if(unlikely(!IS_SAME_KEY)){ + if (unlikely(!IS_SAME_KEY)) { info->dataFormat = false; - info->reRun = true; + info->reRun = true; return TSDB_CODE_SUCCESS; } - }else{ + } else { taosArrayPush(superKV, &kv); } taosArrayPush(maxKVs, &kv); } - }else{ + } else { taosArrayPush(maxKVs, &kv); } taosArrayPush(preLineKV, &kv); cnt++; } - elements->measureTag = (char*)taosMemoryMalloc(elements->measureLen + elements->tagsLen); + elements->measureTag = (char *)taosMemoryMalloc(elements->measureLen + elements->tagsLen); memcpy(elements->measureTag, elements->measure, elements->measureLen); memcpy(elements->measureTag + elements->measureLen, elements->tags, elements->tagsLen); elements->measureTagsLen = elements->measureLen + elements->tagsLen; - SSmlTableInfo **tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen); + SSmlTableInfo **tmp = + (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen); SSmlTableInfo *tinfo = NULL; if (unlikely(tmp == NULL)) { tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen); @@ -258,10 +260,11 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS } } -// SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo)); -// *key = *elements; -// tinfo->key = key; - taosHashPut(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen, &tinfo, POINTER_BYTES); + // SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo)); + // *key = *elements; + // tinfo->key = key; + taosHashPut(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen, &tinfo, + POINTER_BYTES); tmp = &tinfo; } if (info->dataFormat) info->currTableDataCtx = (*tmp)->tableDataCtx; @@ -288,7 +291,7 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine } bool needConverTime = false; // get TS before parse tag(get meta), so need conver time - if(info->dataFormat && info->currSTableMeta == NULL){ + if (info->dataFormat && info->currSTableMeta == NULL) { needConverTime = true; } int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen); @@ -296,7 +299,11 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql); return TSDB_CODE_INVALID_TIMESTAMP; } - SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; + SSmlKv kvTs = {.key = TS, + .keyLen = TS_LEN, + .type = TSDB_DATA_TYPE_TIMESTAMP, + .i = ts, + .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes}; // parse value smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen); @@ -324,19 +331,19 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine return ret; } - if(unlikely(info->reRun)){ + if (unlikely(info->reRun)) { return TSDB_CODE_SUCCESS; } - if(info->dataFormat){ - if(needConverTime) { + if (info->dataFormat) { + if (needConverTime) { kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision); } ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kvTs, 0); - if(ret == TSDB_CODE_SUCCESS){ + if (ret == TSDB_CODE_SUCCESS) { ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 1); } - if(ret == TSDB_CODE_SUCCESS){ + if (ret == TSDB_CODE_SUCCESS) { ret = smlBuildRow(info->currTableDataCtx); } clearColValArray(info->currTableDataCtx->pValues); @@ -344,8 +351,8 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); return ret; } - }else{ - if(elements->colArray == NULL){ + } else { + if (elements->colArray == NULL) { elements->colArray = taosArrayInit(16, sizeof(SSmlKv)); } taosArrayPush(elements->colArray, &kvTs); diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 920705c297..540cec1de3 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -32,15 +32,15 @@ sem_post(x) #endif -int32_t tmqAskEp(tmq_t* tmq, bool async); - -typedef struct { +struct SMqMgmt { int8_t inited; tmr_h timer; int32_t rsetId; -} SMqMgmt; +}; -static SMqMgmt tmqMgmt = {0}; +static TdThreadOnce tmqInit = PTHREAD_ONCE_INIT; // initialize only once +volatile int32_t tmqInitRes = 0; // initialize rsp code +static struct SMqMgmt tmqMgmt = {0}; typedef struct { int8_t tmqRspType; @@ -65,8 +65,7 @@ struct tmq_conf_t { int8_t withTbName; int8_t snapEnable; int32_t snapBatchSize; - - bool hbBgEnable; + bool hbBgEnable; uint16_t port; int32_t autoCommitInterval; @@ -80,16 +79,15 @@ struct tmq_conf_t { struct tmq_t { int64_t refId; // conf - char groupId[TSDB_CGROUP_LEN]; - char clientId[256]; - int8_t withTbName; - int8_t useSnapshot; - int8_t autoCommit; - int32_t autoCommitInterval; - int32_t resetOffsetCfg; - int64_t consumerId; - - bool hbBgEnable; + char groupId[TSDB_CGROUP_LEN]; + char clientId[256]; + int8_t withTbName; + int8_t useSnapshot; + int8_t autoCommit; + int32_t autoCommitInterval; + int32_t resetOffsetCfg; + uint64_t consumerId; + bool hbBgEnable; tmq_commit_cb* commitCb; void* commitCbUserParam; @@ -155,11 +153,9 @@ typedef struct { typedef struct { // subscribe info - char topicName[TSDB_TOPIC_FNAME_LEN]; - char db[TSDB_DB_FNAME_LEN]; - - SArray* vgs; // SArray - + char topicName[TSDB_TOPIC_FNAME_LEN]; + char db[TSDB_DB_FNAME_LEN]; + SArray* vgs; // SArray SSchemaWrapper schema; } SMqClientTopic; @@ -221,13 +217,21 @@ typedef struct { /*int32_t vgId;*/ } SMqCommitCbParam; +static int32_t tmqAskEp(tmq_t* tmq, bool async); + tmq_conf_t* tmq_conf_new() { tmq_conf_t* conf = taosMemoryCalloc(1, sizeof(tmq_conf_t)); + if (conf == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return conf; + } + conf->withTbName = false; conf->autoCommit = true; conf->autoCommitInterval = 5000; conf->resetOffset = TMQ_CONF__RESET_OFFSET__EARLIEAST; conf->hbBgEnable = true; + return conf; } @@ -508,8 +512,8 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT .handle = NULL, }; - tscDebug("consumer:%" PRId64 ", commit offset of %s on vgId:%d, offset is %" PRId64, tmq->consumerId, pOffset->subKey, - pVg->vgId, pOffset->val.version); + tscDebug("consumer:0x%" PRIx64 " topic:%s on vgId:%d offset:%" PRId64, tmq->consumerId, pOffset->subKey, pVg->vgId, + pOffset->val.version); // TODO: put into cb pVg->committedOffset = pVg->currentOffset; @@ -638,21 +642,18 @@ static int32_t tmqCommitConsumerImpl(tmq_t* tmq, int8_t automatic, int8_t async, for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); - tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgNum %d", tmq->consumerId, pTopic->topicName, - (int32_t)taosArrayGetSize(pTopic->vgs)); - - for (int32_t j = 0; j < taosArrayGetSize(pTopic->vgs); j++) { + int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); + for (int32_t j = 0; j < numOfVgroups; j++) { SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); - - tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgId:%d", tmq->consumerId, pTopic->topicName, - pVg->vgId); - if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) { - tscDebug("consumer: %" PRId64 ", vg:%d, current %" PRId64 ", committed %" PRId64 "", tmq->consumerId, pVg->vgId, - pVg->currentOffset.version, pVg->committedOffset.version); + tscDebug("consumer:0x%" PRIx64 " topic:%s vgId:%d, current %" PRId64 ", committed %" PRId64, tmq->consumerId, + pTopic->topicName, pVg->vgId, pVg->currentOffset.version, pVg->committedOffset.version); if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) { continue; } + } else { + tscDebug("consumer:0x%" PRIx64 " topic:%s vgId:%d, not commit, current:%" PRId64 ", ordinal:%d/%d", + tmq->consumerId, pTopic->topicName, pVg->vgId, pVg->currentOffset.version, j + 1, numOfVgroups); } } } @@ -788,32 +789,44 @@ OVER: taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer); } -int32_t tmqHandleAllDelayedTask(tmq_t* tmq) { +int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) { STaosQall* qall = taosAllocateQall(); - taosReadAllQitems(tmq->delayedTask, qall); - while (1) { - int8_t* pTaskType = NULL; - taosGetQitem(qall, (void**)&pTaskType); - if (pTaskType == NULL) break; + taosReadAllQitems(pTmq->delayedTask, qall); + if (qall->numOfItems == 0) { + taosFreeQall(qall); + return TSDB_CODE_SUCCESS; + } + + tscDebug("consumer:0x%" PRIx64 " handle delayed %d tasks before poll data", pTmq->consumerId, qall->numOfItems); + int8_t* pTaskType = NULL; + taosGetQitem(qall, (void**)&pTaskType); + + while (pTaskType != NULL) { if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) { - tmqAskEp(tmq, true); + tmqAskEp(pTmq, true); int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); - *pRefId = tmq->refId; + *pRefId = pTmq->refId; - taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &tmq->epTimer); + tscDebug("consumer:0x%" PRIx64 " next retrieve ep from mnode in 1s", pTmq->consumerId); + taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &pTmq->epTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { - tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam); + tmqCommitInner(pTmq, NULL, 1, 1, pTmq->commitCb, pTmq->commitCbUserParam); int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t)); - *pRefId = tmq->refId; + *pRefId = pTmq->refId; - taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer); + tscDebug("consumer:0x%" PRIx64 " next commit to mnode in %.2fs", pTmq->consumerId, + pTmq->autoCommitInterval / 1000.0); + taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, pRefId, tmqMgmt.timer, &pTmq->commitTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { } + taosFreeQitem(pTaskType); + taosGetQitem(qall, (void**)&pTaskType); } + taosFreeQall(qall); return 0; } @@ -932,23 +945,31 @@ void tmqFreeImpl(void* handle) { taosMemoryFree(tmq); } +static void tmqMgmtInit(void) { + tmqInitRes = 0; + tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ"); + + if (tmqMgmt.timer == NULL) { + tmqInitRes = TSDB_CODE_OUT_OF_MEMORY; + } + + tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); + if (tmqMgmt.rsetId < 0) { + tmqInitRes = terrno; + } +} + tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { - // init timer - int8_t inited = atomic_val_compare_exchange_8(&tmqMgmt.inited, 0, 1); - if (inited == 0) { - tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ"); - if (tmqMgmt.timer == NULL) { - atomic_store_8(&tmqMgmt.inited, 0); - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); + taosThreadOnce(&tmqInit, tmqMgmtInit); + if (tmqInitRes != 0) { + terrno = tmqInitRes; + return NULL; } tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t)); if (pTmq == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - tscError("setting up new consumer failed since %s, consumer group %s", terrstr(), conf->groupId); + tscError("failed to create consumer, consumer group %s, code:%s", conf->groupId, terrstr()); return NULL; } @@ -962,7 +983,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { if (pTmq->clientTopics == NULL || pTmq->mqueue == NULL || pTmq->qall == NULL || pTmq->delayedTask == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), + tscError("consumer:0x%" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); goto FAIL; } @@ -992,7 +1013,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { // init semaphore if (tsem_init(&pTmq->rspSem, 0, 0) != 0) { - tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), + tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); goto FAIL; } @@ -1000,7 +1021,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { // init connection pTmq->pTscObj = taos_connect_internal(conf->ip, user, pass, NULL, NULL, conf->port, CONN_TYPE__TMQ); if (pTmq->pTscObj == NULL) { - tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), + tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(), pTmq->groupId); tsem_destroy(&pTmq->rspSem); goto FAIL; @@ -1018,8 +1039,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer); } - tscInfo("consumer %" PRId64 " is setup, consumer group %s", pTmq->consumerId, pTmq->groupId); - + tscInfo("consumer:0x%" PRIx64 " is setup, consumer groupId %s", pTmq->consumerId, pTmq->groupId); return pTmq; FAIL: @@ -1028,6 +1048,7 @@ FAIL: if (pTmq->delayedTask) taosCloseQueue(pTmq->delayedTask); if (pTmq->qall) taosFreeQall(pTmq->qall); taosMemoryFree(pTmq); + return NULL; } @@ -1037,44 +1058,52 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { void* buf = NULL; SMsgSendInfo* sendInfo = NULL; SCMSubscribeReq req = {0}; - int32_t code = -1; + int32_t code = 0; - tscDebug("tmq subscribe, consumer: %" PRId64 ", topic num %d", tmq->consumerId, sz); + tscDebug("consumer:0x%" PRIx64 " tmq subscribe start, numOfTopic %d", tmq->consumerId, sz); req.consumerId = tmq->consumerId; tstrncpy(req.clientId, tmq->clientId, 256); tstrncpy(req.cgroup, tmq->groupId, TSDB_CGROUP_LEN); req.topicNames = taosArrayInit(sz, sizeof(void*)); - if (req.topicNames == NULL) goto FAIL; - tscDebug("tmq subscribe, consumer: %" PRId64 ", topic num %d", tmq->consumerId, sz); + if (req.topicNames == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; + } for (int32_t i = 0; i < sz; i++) { char* topic = taosArrayGetP(container, i); SName name = {0}; tNameSetDbName(&name, tmq->pTscObj->acctId, topic, strlen(topic)); - char* topicFName = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); if (topicFName == NULL) { goto FAIL; } - tNameExtractFullName(&name, topicFName); - tscDebug("subscribe topic: %s", topicFName); + tNameExtractFullName(&name, topicFName); + tscDebug("consumer:0x%" PRIx64 ", subscribe topic: %s", tmq->consumerId, topicFName); taosArrayPush(req.topicNames, &topicFName); } int32_t tlen = tSerializeSCMSubscribeReq(NULL, &req); + buf = taosMemoryMalloc(tlen); - if (buf == NULL) goto FAIL; + if (buf == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; + } void* abuf = buf; tSerializeSCMSubscribeReq(&abuf, &req); sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (sendInfo == NULL) goto FAIL; + if (sendInfo == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto FAIL; + } SMqSubscribeCbParam param = { .rspErr = 0, @@ -1082,7 +1111,9 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { .epoch = tmq->epoch, }; - if (tsem_init(¶m.rspSem, 0, 0) != 0) goto FAIL; + if (tsem_init(¶m.rspSem, 0, 0) != 0) { + goto FAIL; + } sendInfo->msgInfo = (SDataBuf){ .pData = buf, @@ -1108,15 +1139,18 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { tsem_wait(¶m.rspSem); tsem_destroy(¶m.rspSem); - code = param.rspErr; - if (code != 0) goto FAIL; + if (param.rspErr != 0) { + code = param.rspErr; + goto FAIL; + } int32_t retryCnt = 0; while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) { if (retryCnt++ > 10) { goto FAIL; } - tscDebug("consumer not ready, retry"); + + tscDebug("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt); taosMsleep(500); } @@ -1134,7 +1168,6 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId2, tmqMgmt.timer); } - code = 0; FAIL: taosArrayDestroyP(req.topicNames, taosMemoryFree); taosMemoryFree(buf); @@ -1229,7 +1262,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { tDecoderClear(&decoder); memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead)); - tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d", + tscDebug("consumer:0x%" PRIx64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d", tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version, rspType); @@ -1250,7 +1283,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) { taosMemoryFree(pMsg->pData); taosMemoryFree(pMsg->pEpSet); - tscDebug("consumer:%" PRId64 ", put poll res into mqueue %p", tmq->consumerId, pRspWrapper); + tscDebug("consumer:0x%" PRIx64 ", put poll res into mqueue %p", tmq->consumerId, pRspWrapper); taosWriteQitem(tmq->mqueue, pRspWrapper); tsem_post(&tmq->rspSem); @@ -1267,10 +1300,12 @@ CREATE_MSG_FAIL: bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { bool set = false; + int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); int32_t topicNumGet = taosArrayGetSize(pRsp->topics); - char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; - tscDebug("consumer:%" PRId64 ", update ep epoch %d to epoch %d, topic num:%d", tmq->consumerId, tmq->epoch, epoch, - topicNumGet); + + char vgKey[TSDB_TOPIC_FNAME_LEN + 22]; + tscDebug("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d", + tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur); SArray* newTopics = taosArrayInit(topicNumGet, sizeof(SMqClientTopic)); if (newTopics == NULL) { @@ -1282,19 +1317,19 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { taosArrayDestroy(newTopics); return false; } - int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics); + for (int32_t i = 0; i < topicNumCur; i++) { // find old topic SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i); if (pTopicCur->vgs) { int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs); - tscDebug("consumer:%" PRId64 ", new vg num: %d", tmq->consumerId, vgNumCur); + tscDebug("consumer:0x%" PRIx64 ", new vg num: %d", tmq->consumerId, vgNumCur); for (int32_t j = 0; j < vgNumCur; j++) { SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j); sprintf(vgKey, "%s:%d", pTopicCur->topicName, pVgCur->vgId); char buf[80]; tFormatOffset(buf, 80, &pVgCur->currentOffset); - tscDebug("consumer:%" PRId64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch, + tscDebug("consumer:0x%" PRIx64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch, pVgCur->vgId, vgKey, buf); taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(STqOffsetVal)); } @@ -1310,7 +1345,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN); tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN); - tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName); + tscDebug("consumer:0x%" PRIx64 ", update topic: %s", tmq->consumerId, topic.topicName); int32_t vgNumGet = taosArrayGetSize(pTopicEp->vgs); topic.vgs = taosArrayInit(vgNumGet, sizeof(SMqClientVg)); @@ -1336,6 +1371,8 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { } taosArrayPush(newTopics, &topic); } + + // destroy current buffered existed topics info if (tmq->clientTopics) { int32_t sz = taosArrayGetSize(tmq->clientTopics); for (int32_t i = 0; i < sz; i++) { @@ -1343,17 +1380,21 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) { if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema); taosArrayDestroy(pTopic->vgs); } + taosArrayDestroy(tmq->clientTopics); } + taosHashCleanup(pHash); tmq->clientTopics = newTopics; - if (taosArrayGetSize(tmq->clientTopics) == 0) + if (taosArrayGetSize(tmq->clientTopics) == 0) { atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__NO_TOPIC); - else + } else { atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__READY); + } atomic_store_32(&tmq->epoch, epoch); + tscDebug("consumer:0x%" PRIx64 ", update topic info completed", tmq->consumerId); return set; } @@ -1376,8 +1417,8 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { pParam->code = code; if (code != 0) { - tscError("consumer:%" PRId64 ", get topic endpoint error, not ready, wait:%d, code %x", tmq->consumerId, - pParam->async, code); + tscError("consumer:0x%" PRIx64 ", get topic endpoint error, async:%d, code:%s", tmq->consumerId, pParam->async, + tstrerror(code)); goto END; } @@ -1386,11 +1427,15 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { // Epoch will only increase when received newer epoch ep msg SMqRspHead* head = pMsg->pData; int32_t epoch = atomic_load_32(&tmq->epoch); - tscDebug("consumer:%" PRId64 ", recv ep, msg epoch %d, current epoch %d", tmq->consumerId, head->epoch, epoch); if (head->epoch <= epoch) { + tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, no need to update local ep", + tmq->consumerId, head->epoch, epoch); goto END; } + tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId, + head->epoch, epoch); + if (!async) { SMqAskEpRsp rsp; tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &rsp); @@ -1405,6 +1450,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) { code = -1; goto END; } + pWrapper->tmqRspType = TMQ_MSG_TYPE__EP_RSP; pWrapper->epoch = head->epoch; memcpy(&pWrapper->msg, pMsg->pData, sizeof(SMqRspHead)); @@ -1428,16 +1474,17 @@ END: } int32_t tmqAskEp(tmq_t* tmq, bool async) { - int32_t code = 0; + int32_t code = TSDB_CODE_SUCCESS; #if 0 int8_t epStatus = atomic_val_compare_exchange_8(&tmq->epStatus, 0, 1); if (epStatus == 1) { int32_t epSkipCnt = atomic_add_fetch_32(&tmq->epSkipCnt, 1); - tscTrace("consumer:%" PRId64 ", skip ask ep cnt %d", tmq->consumerId, epSkipCnt); + tscTrace("consumer:0x%" PRIx64 ", skip ask ep cnt %d", tmq->consumerId, epSkipCnt); if (epSkipCnt < 5000) return 0; } atomic_store_32(&tmq->epSkipCnt, 0); #endif + SMqAskEpReq req = {0}; req.consumerId = tmq->consumerId; req.epoch = tmq->epoch; @@ -1445,27 +1492,31 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { int32_t tlen = tSerializeSMqAskEpReq(NULL, 0, &req); if (tlen < 0) { - tscError("tSerializeSMqAskEpReq failed"); + tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq failed", tmq->consumerId); return -1; } + void* pReq = taosMemoryCalloc(1, tlen); if (pReq == NULL) { - tscError("failed to malloc askEpReq msg, size:%d", tlen); + tscError("consumer:0x%" PRIx64 ", failed to malloc askEpReq msg, size:%d", tmq->consumerId, tlen); + terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } + if (tSerializeSMqAskEpReq(pReq, tlen, &req) < 0) { - tscError("tSerializeSMqAskEpReq %d failed", tlen); + tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq %d failed", tmq->consumerId, tlen); taosMemoryFree(pReq); return -1; } SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam)); if (pParam == NULL) { - tscError("failed to malloc subscribe param"); + tscError("consumer:0x%" PRIx64 ", failed to malloc subscribe param", tmq->consumerId); taosMemoryFree(pReq); /*atomic_store_8(&tmq->epStatus, 0);*/ return -1; } + pParam->refId = tmq->refId; pParam->epoch = tmq->epoch; pParam->async = async; @@ -1486,15 +1537,14 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { .handle = NULL, }; - sendInfo->requestId = generateRequestId(); + sendInfo->requestId = tmq->consumerId; sendInfo->requestObjRefId = 0; sendInfo->param = pParam; sendInfo->fp = tmqAskEpCb; sendInfo->msgType = TDMT_MND_TMQ_ASK_EP; SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp); - - tscDebug("consumer:%" PRId64 ", ask ep", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 " ask ep from mnode, async:%d", tmq->consumerId, async); int64_t transporterId = 0; asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo); @@ -1504,6 +1554,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { code = pParam->code; taosMemoryFree(pParam); } + return code; } @@ -1576,26 +1627,31 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) { return pRspObj; } +// broadcast the poll request to all related vnodes int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { - for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { + int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics); + tscDebug("consumer:0x%" PRIx64 " start to poll data, numOfTopics:%d", tmq->consumerId, numOfTopics); + + for (int i = 0; i < numOfTopics; i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); for (int j = 0; j < taosArrayGetSize(pTopic->vgs); j++) { SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); int32_t vgStatus = atomic_val_compare_exchange_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE, TMQ_VG_STATUS__WAIT); - if (vgStatus != TMQ_VG_STATUS__IDLE) { + if (vgStatus == TMQ_VG_STATUS__WAIT) { int32_t vgSkipCnt = atomic_add_fetch_32(&pVg->vgSkipCnt, 1); - tscTrace("consumer:%" PRId64 ", epoch %d skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch, pVg->vgId, - vgSkipCnt); + tscDebug("consumer:0x%" PRIx64 " epoch %d wait poll-rsp, skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch, + pVg->vgId, vgSkipCnt); continue; /*if (vgSkipCnt < 10000) continue;*/ #if 0 if (skipCnt < 30000) { continue; } else { - tscDebug("consumer:%" PRId64 ",skip vgId:%d skip too much reset", tmq->consumerId, pVg->vgId); + tscDebug("consumer:0x%" PRIx64 ",skip vgId:%d skip too much reset", tmq->consumerId, pVg->vgId); } #endif } + atomic_store_32(&pVg->vgSkipCnt, 0); SMqPollReq req = {0}; @@ -1606,6 +1662,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { tsem_post(&tmq->rspSem); return -1; } + char* msg = taosMemoryCalloc(1, msgSize); if (NULL == msg) { atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE); @@ -1627,6 +1684,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { tsem_post(&tmq->rspSem); return -1; } + pParam->refId = tmq->refId; pParam->epoch = tmq->epoch; @@ -1648,6 +1706,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { .len = msgSize, .handle = NULL, }; + sendInfo->requestId = req.reqId; sendInfo->requestObjRefId = 0; sendInfo->param = pParam; @@ -1655,18 +1714,19 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) { sendInfo->msgType = TDMT_VND_TMQ_CONSUME; int64_t transporterId = 0; - /*printf("send poll\n");*/ char offsetFormatBuf[80]; tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffset); - tscDebug("consumer:%" PRId64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:%" PRIu64, + + tscDebug("consumer:0x%" PRIx64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:0x%" PRIx64, tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId); - /*printf("send vgId:%d %" PRId64 "\n", pVg->vgId, pVg->currentOffset);*/ asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo); + pVg->pollCnt++; tmq->pollCnt++; } } + return 0; } @@ -1704,7 +1764,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { } } - tscDebug("consumer:%" PRId64 " handle rsp %p", tmq->consumerId, rspWrapper); + tscDebug("consumer:0x%" PRIx64 " handle rsp %p", tmq->consumerId, rspWrapper); if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__END_RSP) { taosFreeQitem(rspWrapper); @@ -1712,7 +1772,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { return NULL; } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) { SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper; - tscDebug("consumer %" PRId64 " actual process poll rsp", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 " actual process poll rsp", tmq->consumerId); /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/ int32_t consumerEpoch = atomic_load_32(&tmq->epoch); if (pollRspWrapper->dataRsp.head.epoch == consumerEpoch) { @@ -1731,8 +1791,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", - pollRspWrapper->dataRsp.head.epoch, consumerEpoch); + tscDebug("consumer:0x%" PRIx64 " msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tmq->consumerId, pollRspWrapper->dataRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); } @@ -1750,8 +1810,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", - pollRspWrapper->metaRsp.head.epoch, consumerEpoch); + tscDebug("consumer:0x%" PRIx64 " msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tmq->consumerId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); } @@ -1781,8 +1841,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { taosFreeQitem(pollRspWrapper); return pRsp; } else { - tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", - pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); + tscDebug("consumer:0x%" PRIx64 " msg discard since epoch mismatch: msg epoch %d, consumer epoch %d", + tmq->consumerId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch); tmqFreeRspWrapper(rspWrapper); taosFreeQitem(pollRspWrapper); } @@ -1792,7 +1852,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { tmqHandleNoPollRsp(tmq, rspWrapper, &reset); taosFreeQitem(rspWrapper); if (pollIfReset && reset) { - tscDebug("consumer:%" PRId64 ", reset and repoll", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 ", reset and repoll", tmq->consumerId); tmqPollImpl(tmq, timeout); } } @@ -1803,7 +1863,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { void* rspObj; int64_t startTime = taosGetTimestampMs(); - tscDebug("consumer:%" PRId64 ", start poll at %" PRId64, tmq->consumerId, startTime); + tscDebug("consumer:0x%" PRIx64 ", start poll at %" PRId64, tmq->consumerId, startTime); #if 0 tmqHandleAllDelayedTask(tmq); @@ -1816,7 +1876,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { // in no topic status, delayed task also need to be processed if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) { - tscDebug("consumer:%" PRId64 ", poll return since consumer status is init", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 ", poll return since consumer status is init", tmq->consumerId); return NULL; } @@ -1826,35 +1886,38 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { if (retryCnt++ > 10) { return NULL; } - tscDebug("consumer not ready, retry"); + + tscDebug("consumer:0x%" PRIx64 " not ready, retry:%d/10 in 500ms", tmq->consumerId, retryCnt); taosMsleep(500); } } while (1) { tmqHandleAllDelayedTask(tmq); + if (tmqPollImpl(tmq, timeout) < 0) { - tscDebug("consumer:%" PRId64 " return since poll err", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId); /*return NULL;*/ } rspObj = tmqHandleAllRsp(tmq, timeout, false); if (rspObj) { - tscDebug("consumer:%" PRId64 ", return rsp %p", tmq->consumerId, rspObj); + tscDebug("consumer:0x%" PRIx64 ", return rsp %p", tmq->consumerId, rspObj); return (TAOS_RES*)rspObj; } else if (terrno == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) { - tscDebug("consumer:%" PRId64 ", return null since no committed offset", tmq->consumerId); + tscDebug("consumer:0x%" PRIx64 ", return null since no committed offset", tmq->consumerId); return NULL; } + if (timeout != -1) { int64_t currentTime = taosGetTimestampMs(); int64_t passedTime = currentTime - startTime; if (passedTime > timeout) { - tscDebug("consumer:%" PRId64 ", (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64, + tscDebug("consumer:0x%" PRIx64 ", (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64, tmq->consumerId, tmq->epoch, startTime, currentTime); return NULL; } - /*tscInfo("consumer:%" PRId64 ", (epoch %d) wait, start time %" PRId64 ", current time %" PRId64*/ + /*tscInfo("consumer:0x%" PRIx64 ", (epoch %d) wait, start time %" PRId64 ", current time %" PRId64*/ /*", left time %" PRId64,*/ /*tmq->consumerId, tmq->epoch, startTime, currentTime, (timeout - passedTime));*/ tsem_timewait(&tmq->rspSem, (timeout - passedTime)); diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index a75411a854..cb3c2f8c68 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -162,6 +162,11 @@ void *queryThread(void *arg) { } static int32_t numOfThreads = 1; + +void tmq_commit_cb_print(tmq_t *pTmq, int32_t code, void *param) { + printf("success, code:%d\n", code); +} + } // namespace int main(int argc, char** argv) { @@ -176,12 +181,12 @@ int main(int argc, char** argv) { return RUN_ALL_TESTS(); } -TEST(testCase, driverInit_Test) { +TEST(clientCase, driverInit_Test) { // taosInitGlobalCfg(); // taos_init(); } -TEST(testCase, connect_Test) { +TEST(clientCase, connect_Test) { taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg"); TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); @@ -190,8 +195,8 @@ TEST(testCase, connect_Test) { } taos_close(pConn); } -#if 0 -TEST(testCase, create_user_Test) { + +TEST(clientCase, create_user_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -204,7 +209,7 @@ TEST(testCase, create_user_Test) { taos_close(pConn); } -TEST(testCase, create_account_Test) { +TEST(clientCase, create_account_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -217,7 +222,7 @@ TEST(testCase, create_account_Test) { taos_close(pConn); } -TEST(testCase, drop_account_Test) { +TEST(clientCase, drop_account_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -230,7 +235,7 @@ TEST(testCase, drop_account_Test) { taos_close(pConn); } -TEST(testCase, show_user_Test) { +TEST(clientCase, show_user_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -250,7 +255,7 @@ TEST(testCase, show_user_Test) { taos_close(pConn); } -TEST(testCase, drop_user_Test) { +TEST(clientCase, drop_user_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -263,7 +268,7 @@ TEST(testCase, drop_user_Test) { taos_close(pConn); } -TEST(testCase, show_db_Test) { +TEST(clientCase, show_db_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -282,7 +287,7 @@ TEST(testCase, show_db_Test) { taos_close(pConn); } -TEST(testCase, create_db_Test) { +TEST(clientCase, create_db_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -306,7 +311,7 @@ TEST(testCase, create_db_Test) { taos_close(pConn); } -TEST(testCase, create_dnode_Test) { +TEST(clientCase, create_dnode_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -325,7 +330,7 @@ TEST(testCase, create_dnode_Test) { taos_close(pConn); } -TEST(testCase, drop_dnode_Test) { +TEST(clientCase, drop_dnode_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -349,7 +354,7 @@ TEST(testCase, drop_dnode_Test) { taos_close(pConn); } -TEST(testCase, use_db_test) { +TEST(clientCase, use_db_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -367,7 +372,7 @@ TEST(testCase, use_db_test) { taos_close(pConn); } -// TEST(testCase, drop_db_test) { +// TEST(clientCase, drop_db_test) { // TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); // assert(pConn != NULL); // @@ -389,7 +394,7 @@ TEST(testCase, use_db_test) { // taos_close(pConn); //} -TEST(testCase, create_stable_Test) { +TEST(clientCase, create_stable_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -428,7 +433,7 @@ TEST(testCase, create_stable_Test) { taos_close(pConn); } -TEST(testCase, create_table_Test) { +TEST(clientCase, create_table_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -447,7 +452,7 @@ TEST(testCase, create_table_Test) { taos_close(pConn); } -TEST(testCase, create_ctable_Test) { +TEST(clientCase, create_ctable_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -472,7 +477,7 @@ TEST(testCase, create_ctable_Test) { taos_close(pConn); } -TEST(testCase, show_stable_Test) { +TEST(clientCase, show_stable_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != nullptr); @@ -497,7 +502,7 @@ TEST(testCase, show_stable_Test) { taos_close(pConn); } -TEST(testCase, show_vgroup_Test) { +TEST(clientCase, show_vgroup_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -529,7 +534,7 @@ TEST(testCase, show_vgroup_Test) { taos_close(pConn); } -TEST(testCase, create_multiple_tables) { +TEST(clientCase, create_multiple_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -600,7 +605,7 @@ TEST(testCase, create_multiple_tables) { taos_close(pConn); } -TEST(testCase, show_table_Test) { +TEST(clientCase, show_table_Test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); assert(pConn != NULL); @@ -634,7 +639,7 @@ TEST(testCase, show_table_Test) { taos_close(pConn); } -//TEST(testCase, drop_stable_Test) { +//TEST(clientCase, drop_stable_Test) { // TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); // assert(pConn != nullptr); // @@ -659,14 +664,14 @@ TEST(testCase, show_table_Test) { // taos_close(pConn); //} -TEST(testCase, generated_request_id_test) { +TEST(clientCase, generated_request_id_test) { SHashObj* phash = taosHashInit(10000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); for (int32_t i = 0; i < 50000; ++i) { uint64_t v = generateRequestId(); void* result = taosHashGet(phash, &v, sizeof(v)); if (result != nullptr) { - printf("0x%lx, index:%d\n", v, i); +// printf("0x%llx, index:%d\n", v, i); } assert(result == nullptr); taosHashPut(phash, &v, sizeof(v), NULL, 0); @@ -675,7 +680,7 @@ TEST(testCase, generated_request_id_test) { taosHashCleanup(phash); } -TEST(testCase, insert_test) { +TEST(clientCase, insert_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -692,9 +697,8 @@ TEST(testCase, insert_test) { taos_free_result(pRes); taos_close(pConn); } -#endif -TEST(testCase, projection_query_tables) { +TEST(clientCase, projection_query_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -752,8 +756,7 @@ TEST(testCase, projection_query_tables) { taos_close(pConn); } -#if 0 -TEST(testCase, tsbs_perf_test) { +TEST(clientCase, tsbs_perf_test) { TdThread qid[20] = {0}; for(int32_t i = 0; i < numOfThreads; ++i) { @@ -762,7 +765,7 @@ TEST(testCase, tsbs_perf_test) { getchar(); } -TEST(testCase, projection_query_stables) { +TEST(clientCase, projection_query_stables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -790,7 +793,7 @@ TEST(testCase, projection_query_stables) { taos_close(pConn); } -TEST(testCase, agg_query_tables) { +TEST(clientCase, agg_query_tables) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -825,7 +828,7 @@ create table tm1 using m1 tags(2); insert into tm0 values('2021-1-1 1:1:1.120', 1) ('2021-1-1 1:1:2.9', 2) tm1 values('2021-1-1 1:1:1.120', 11) ('2021-1-1 1:1:2.99', 22); */ -TEST(testCase, async_api_test) { +TEST(clientCase, async_api_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -859,7 +862,7 @@ TEST(testCase, async_api_test) { taos_close(pConn); } -TEST(testCase, update_test) { +TEST(clientCase, update_test) { TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(pConn, nullptr); @@ -895,6 +898,76 @@ TEST(testCase, update_test) { } } -#endif +TEST(clientCase, subscription_test) { + TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(pConn, nullptr); + + // TAOS_RES* pRes = taos_query(pConn, "create topic topic_t1 as select * from t1"); + // if (taos_errno(pRes) != TSDB_CODE_SUCCESS) { + // printf("failed to create topic, code:%s", taos_errstr(pRes)); + // taos_free_result(pRes); + // return; + // } + + tmq_conf_t* conf = tmq_conf_new(); + tmq_conf_set(conf, "enable.auto.commit", "true"); + tmq_conf_set(conf, "auto.commit.interval.ms", "1000"); + tmq_conf_set(conf, "group.id", "newabcdefgjhijlm__"); + tmq_conf_set(conf, "td.connect.user", "root"); + tmq_conf_set(conf, "td.connect.pass", "taosdata"); + tmq_conf_set(conf, "auto.offset.reset", "earliest"); + tmq_conf_set(conf, "experimental.snapshot.enable", "true"); + tmq_conf_set(conf, "msg.with.table.name", "true"); + tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); + + tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); + tmq_conf_destroy(conf); + + // 创建订阅 topics 列表 + tmq_list_t* topicList = tmq_list_new(); + tmq_list_append(topicList, "topic_t1"); + + // 启动订阅 + tmq_subscribe(tmq, topicList); + tmq_list_destroy(topicList); + + TAOS_FIELD* fields = NULL; + int32_t numOfFields = 0; + int32_t precision = 0; + int32_t totalRows = 0; + int32_t msgCnt = 0; + int32_t timeout = 5000; + + while (1) { + TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout); + if (pRes) { + char buf[1024]; + int32_t rows = 0; + + const char* topicName = tmq_get_topic_name(pRes); + const char* dbName = tmq_get_db_name(pRes); + int32_t vgroupId = tmq_get_vgroup_id(pRes); + + printf("topic: %s\n", topicName); + printf("db: %s\n", dbName); + printf("vgroup id: %d\n", vgroupId); + + while (1) { + TAOS_ROW row = taos_fetch_row(pRes); + if (row == NULL) break; + + fields = taos_fetch_fields(pRes); + numOfFields = taos_field_count(pRes); + precision = taos_result_precision(pRes); + rows++; + taos_print_row(buf, row, fields, numOfFields); + printf("precision: %d, row content: %s\n", precision, buf); + } + } +// return rows; + } + + fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); +} #pragma GCC diagnostic pop diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2d4c571d31..e866203372 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1546,7 +1546,10 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) { } void colDataDestroy(SColumnInfoData* pColData) { - if (!pColData) return; + if (!pColData) { + return; + } + if (IS_VAR_DATA_TYPE(pColData->info.type)) { taosMemoryFreeClear(pColData->varmeta.offset); } else { @@ -2525,8 +2528,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) { pStart += sizeof(uint64_t); if (pBlock->pDataBlock == NULL) { - pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); - taosArraySetSize(pBlock->pDataBlock, numOfCols); + pBlock->pDataBlock = taosArrayInit_s(numOfCols, sizeof(SColumnInfoData), numOfCols); } for (int32_t i = 0; i < numOfCols; ++i) { diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index 7c0de3d6f0..4cac0cbe14 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -139,7 +139,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) { nkv += tPutI16v(NULL, -pTColumn->colId); nIdx++; } else { - ASSERT(0); + if (ASSERTS(0, "invalid input")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } } pTColumn = (++iTColumn < pTSchema->numOfCols) ? pTSchema->columns + iTColumn : NULL; @@ -176,8 +179,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) { ntp = sizeof(SRow) + BIT2_SIZE(pTSchema->numOfCols - 1) + pTSchema->flen + ntp; break; default: - ASSERT(0); - break; + if (ASSERTS(0, "impossible")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } } if (maxIdx <= UINT8_MAX) { nkv = sizeof(SRow) + sizeof(SKVIdx) + nIdx + nkv; @@ -306,8 +311,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) { pv = pf + pTSchema->flen; break; default: - ASSERT(0); - break; + if (ASSERTS(0, "impossible")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } } if (pb) { @@ -370,7 +377,7 @@ _exit: return code; } -void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { +int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { ASSERT(iCol < pTSchema->numOfCols); ASSERT(pRow->sver == pTSchema->version); @@ -381,17 +388,17 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { pColVal->type = pTColumn->type; pColVal->flag = CV_FLAG_VALUE; memcpy(&pColVal->value.val, &pRow->ts, sizeof(TSKEY)); - return; + return 0; } if (pRow->flag == HAS_NONE) { *pColVal = COL_VAL_NONE(pTColumn->colId, pTColumn->type); - return; + return 0; } if (pRow->flag == HAS_NULL) { *pColVal = COL_VAL_NULL(pTColumn->colId, pTColumn->type); - return; + return 0; } if (pRow->flag >> 4) { // KV Row @@ -440,7 +447,7 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { memcpy(&pColVal->value.val, pData, pTColumn->bytes); } } - return; + return 0; } else if (TABS(cid) < pTColumn->colId) { lidx = mid + 1; } else { @@ -492,16 +499,16 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { pv = pf + pTSchema->flen; break; default: - ASSERT(0); - break; + ASSERTS(0, "invalid row format"); + return TSDB_CODE_IVLD_DATA_FMT; } if (bv == BIT_FLG_NONE) { *pColVal = COL_VAL_NONE(pTColumn->colId, pTColumn->type); - return; + return 0; } else if (bv == BIT_FLG_NULL) { *pColVal = COL_VAL_NULL(pTColumn->colId, pTColumn->type); - return; + return 0; } pColVal->cid = pTColumn->colId; @@ -520,6 +527,8 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { } } } + + return 0; } void tRowDestroy(SRow *pRow) { @@ -710,7 +719,6 @@ int32_t tRowIterOpen(SRow *pRow, STSchema *pTSchema, SRowIter **ppIter) { _exit: if (code) { *ppIter = NULL; - if (pIter) taosMemoryFree(pIter); } else { *ppIter = pIter; } @@ -929,8 +937,8 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData * pv = pf + pTSchema->flen; break; default: - ASSERT(0); - break; + ASSERTS(0, "Invalid row flag"); + return TSDB_CODE_IVLD_DATA_FMT; } while (pColData) { @@ -954,8 +962,8 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData * bv = GET_BIT2(pb, iTColumn - 1); break; default: - ASSERT(0); - break; + ASSERTS(0, "Invalid row flag"); + return TSDB_CODE_IVLD_DATA_FMT; } if (bv == BIT_FLG_NONE) { @@ -1045,7 +1053,8 @@ static int32_t tRowKVUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aCo } else if (pRow->flag & KV_FLG_BIG) { pData = pv + ((uint32_t *)pKVIdx->idx)[iCol]; } else { - ASSERT(0); + ASSERTS(0, "Invalid KV row format"); + return TSDB_CODE_IVLD_DATA_FMT; } int16_t cid; @@ -1579,7 +1588,7 @@ static FORCE_INLINE int32_t tColDataPutValue(SColData *pColData, uint8_t *pData, int32_t code = 0; if (IS_VAR_DATA_TYPE(pColData->type)) { - code = tRealloc((uint8_t **)(&pColData->aOffset), (pColData->nVal + 1) << 2); + code = tRealloc((uint8_t **)(&pColData->aOffset), ((int64_t)(pColData->nVal + 1)) << 2); if (code) goto _exit; pColData->aOffset[pColData->nVal] = pColData->nData; @@ -2312,35 +2321,25 @@ void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) { } uint8_t tColDataGetBitValue(const SColData *pColData, int32_t iVal) { - uint8_t v; switch (pColData->flag) { case HAS_NONE: - v = 0; - break; + return 0; case HAS_NULL: - v = 1; - break; + return 1; case (HAS_NULL | HAS_NONE): - v = GET_BIT1(pColData->pBitMap, iVal); - break; + return GET_BIT1(pColData->pBitMap, iVal); case HAS_VALUE: - v = 2; - break; + return 2; case (HAS_VALUE | HAS_NONE): - v = GET_BIT1(pColData->pBitMap, iVal); - if (v) v = 2; - break; + return (GET_BIT1(pColData->pBitMap, iVal)) ? 2 : 0; case (HAS_VALUE | HAS_NULL): - v = GET_BIT1(pColData->pBitMap, iVal) + 1; - break; + return GET_BIT1(pColData->pBitMap, iVal) + 1; case (HAS_VALUE | HAS_NULL | HAS_NONE): - v = GET_BIT2(pColData->pBitMap, iVal); - break; + return GET_BIT2(pColData->pBitMap, iVal); default: - ASSERT(0); - break; + ASSERTS(0, "not possible"); + return 0; } - return v; } int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMalloc, void *arg) { diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d08110becd..e64646dd03 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -41,6 +41,7 @@ bool tsPrintAuth = false; // queue & threads int32_t tsNumOfRpcThreads = 1; +int32_t tsNumOfRpcSessions = 2000; int32_t tsNumOfCommitThreads = 2; int32_t tsNumOfTaskQueueThreads = 4; int32_t tsNumOfMnodeQueryThreads = 4; @@ -54,7 +55,6 @@ int32_t tsNumOfQnodeQueryThreads = 4; int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; - // sync raft int32_t tsElectInterval = 25 * 1000; int32_t tsHeartbeatInterval = 1000; @@ -140,6 +140,7 @@ int32_t tsMaxMemUsedByInsert = 1024; float tsSelectivityRatio = 1.0; int32_t tsTagFilterResCacheSize = 1024 * 10; +char tsTagFilterCache = 0; // the maximum allowed query buffer size during query processing for each data node. // -1 no limit (default) @@ -188,6 +189,7 @@ int32_t tsGrantHBInterval = 60; int32_t tsUptimeInterval = 300; // seconds char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits char tsUdfdLdLibPath[512] = ""; +bool tsDisableStream = false; #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { @@ -349,6 +351,7 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) { if (cfgAddBool(pCfg, "AVX2", tsAVX2Enable, 0) != 0) return -1; if (cfgAddBool(pCfg, "FMA", tsFMAEnable, 0) != 0) return -1; if (cfgAddBool(pCfg, "SIMD-builtins", tsSIMDBuiltins, 0) != 0) return -1; + if (cfgAddBool(pCfg, "tagFilterCache", tsTagFilterCache, 0) != 0) return -1; if (cfgAddInt64(pCfg, "openMax", tsOpenMax, 0, INT64_MAX, 1) != 0) return -1; #if !defined(_ALPINE) @@ -388,9 +391,12 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, 0) != 0) return -1; tsNumOfRpcThreads = tsNumOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS); if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1; + tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000); + if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, 0) != 0) return -1; + tsNumOfCommitThreads = tsNumOfCores / 2; tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4); if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1; @@ -467,6 +473,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "udfdResFuncs", tsUdfdResFuncs, 0) != 0) return -1; if (cfgAddString(pCfg, "udfdLdLibPath", tsUdfdLdLibPath, 0) != 0) return -1; + if (cfgAddBool(pCfg, "disableStream", tsDisableStream, 0) != 0) return -1; + GRANT_CFG_ADD; return 0; } @@ -496,11 +504,19 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem = cfgGetItem(tsCfg, "numOfRpcThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfRpcThreads = numOfCores / 2; - tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4); + tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS); pItem->i32 = tsNumOfRpcThreads; pItem->stype = stype; } + pItem = cfgGetItem(tsCfg, "numOfRpcSessions"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsNumOfRpcSessions = 2000; + tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000); + pItem->i32 = tsNumOfRpcSessions; + pItem->stype = stype; + } + pItem = cfgGetItem(tsCfg, "numOfCommitThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfCommitThreads = numOfCores / 2; @@ -718,6 +734,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval; tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32; + tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32; tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32; tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32; tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32; @@ -731,6 +748,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64; tsSIMDBuiltins = (bool)cfgGetItem(pCfg, "SIMD-builtins")->bval; + tsTagFilterCache = (bool)cfgGetItem(pCfg, "tagFilterCache")->bval; tsEnableMonitor = cfgGetItem(pCfg, "monitor")->bval; tsMonitorInterval = cfgGetItem(pCfg, "monitorInterval")->i32; @@ -767,6 +785,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { if (tsQueryBufferSize >= 0) { tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL; } + + tsDisableStream = cfgGetItem(pCfg, "disableStream")->bval; + GRANT_CFG_GET; return 0; } @@ -973,6 +994,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) { tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32; } else if (strcasecmp("numOfRpcThreads", name) == 0) { tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32; + } else if (strcasecmp("numOfRpcSessions", name) == 0) { + tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32; } else if (strcasecmp("numOfCommitThreads", name) == 0) { tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32; } else if (strcasecmp("numOfMnodeReadThreads", name) == 0) { diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index d84c807fb1..e180959d1e 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -5592,6 +5592,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS if (tEncodeI16(&encoder, pCol->colId) < 0) return -1; if (tEncodeI8(&encoder, pCol->type) < 0) return -1; } + if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1; if (tEncodeI8(&encoder, pReq->igUpdate) < 0) return -1; tEndEncode(&encoder); @@ -5676,6 +5677,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea } } + if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1; if (tDecodeI8(&decoder, &pReq->igUpdate) < 0) return -1; tEndDecode(&decoder); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index d2b9618c60..8abce50cf0 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -280,10 +280,19 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.retryMaxInterval = tsRedirectMaxPeriod; rpcInit.retryMaxTimouet = tsMaxRetryWaitTime; - rpcInit.failFastInterval = 1000; // interval threshold(ms) + rpcInit.failFastInterval = 5000; // interval threshold(ms) rpcInit.failFastThreshold = 3; // failed threshold rpcInit.ffp = dmFailFastFp; + int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3); + connLimitNum = TMAX(connLimitNum, 10); + connLimitNum = TMIN(connLimitNum, 500); + + rpcInit.connLimitNum = connLimitNum; + rpcInit.connLimitLock = 1; + rpcInit.supportBatch = 1; + rpcInit.batchSize = 8 * 1024; + pTrans->clientRpc = rpcOpen(&rpcInit); if (pTrans->clientRpc == NULL) { dError("failed to init dnode rpc client"); diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index a7a5b8b999..e9ab8a0460 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -402,7 +402,7 @@ static int32_t dmDecodeEpPairs(SJson *pJson, SDnodeData *pData) { int32_t code = 0; SJson *dnodes = tjsonGetObjectItem(pJson, "dnodes"); - if (dnodes == NULL) return 0; + if (dnodes == NULL) return -1; int32_t numOfDnodes = tjsonGetArraySize(dnodes); for (int32_t i = 0; i < numOfDnodes; ++i) { diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index da2ad158e3..38001a97bb 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -146,7 +146,9 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) { // 3.0.20 if (sver >= 2) { if (tDecodeI64(pDecoder, &pObj->checkpointFreq) < 0) return -1; - if (tDecodeI8(pDecoder, &pObj->igCheckUpdate) < 0) return -1; + if (!tDecodeIsEnd(pDecoder)) { + if (tDecodeI8(pDecoder, &pObj->igCheckUpdate) < 0) return -1; + } } tEndDecode(pDecoder); return 0; @@ -415,19 +417,21 @@ void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp) { return (void *)buf; } -SMqSubscribeObj *tNewSubscribeObj(const char key[TSDB_SUBSCRIBE_KEY_LEN]) { - SMqSubscribeObj *pSubNew = taosMemoryCalloc(1, sizeof(SMqSubscribeObj)); - if (pSubNew == NULL) return NULL; - memcpy(pSubNew->key, key, TSDB_SUBSCRIBE_KEY_LEN); - taosInitRWLatch(&pSubNew->lock); - pSubNew->vgNum = 0; - pSubNew->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); +SMqSubscribeObj *tNewSubscribeObj(const char* key) { + SMqSubscribeObj *pSubObj = taosMemoryCalloc(1, sizeof(SMqSubscribeObj)); + if (pSubObj == NULL) { + return NULL; + } + + memcpy(pSubObj->key, key, TSDB_SUBSCRIBE_KEY_LEN); + taosInitRWLatch(&pSubObj->lock); + pSubObj->vgNum = 0; + pSubObj->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + // TODO set hash free fp - /*taosHashSetFreeFp(pSubNew->consumerHash, tDeleteSMqConsumerEp);*/ - - pSubNew->unassignedVgs = taosArrayInit(0, sizeof(void *)); - - return pSubNew; + /*taosHashSetFreeFp(pSubObj->consumerHash, tDeleteSMqConsumerEp);*/ + pSubObj->unassignedVgs = taosArrayInit(0, POINTER_BYTES); + return pSubObj; } SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) { diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 97490beb3c..7f95774ea0 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -882,6 +882,12 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { if (strcasecmp(cfgReq.config, "resetlog") == 0) { strcpy(dcfgReq.config, "resetlog"); } else if (strncasecmp(cfgReq.config, "monitor", 7) == 0) { + if (' ' != cfgReq.config[7] && 0 != cfgReq.config[7]) { + mError("dnode:%d, failed to config monitor since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + const char *value = cfgReq.value; int32_t flag = atoi(value); if (flag <= 0) { @@ -902,12 +908,18 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { int32_t optLen = strlen(optName); if (strncasecmp(cfgReq.config, optName, optLen) != 0) continue; + if (' ' != cfgReq.config[optLen] && 0 != cfgReq.config[optLen]) { + mError("dnode:%d, failed to config since invalid conf:%s", cfgReq.dnodeId, cfgReq.config); + terrno = TSDB_CODE_INVALID_CFG; + return -1; + } + const char *value = cfgReq.value; int32_t flag = atoi(value); if (flag <= 0) { flag = atoi(cfgReq.config + optLen + 1); } - if (flag <= 0 || flag > 255) { + if (flag < 0 || flag > 255) { mError("dnode:%d, failed to config %s since value:%d", cfgReq.dnodeId, optName, flag); terrno = TSDB_CODE_INVALID_CFG; return -1; diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index 016f01b032..8782fd823f 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -638,7 +638,7 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb } int32_t mndAddIndexImpl(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, SIdxObj *pIdx) { // impl later - int32_t code = 0; + int32_t code = -1; SStbObj newStb = {0}; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb-index"); if (pTrans == NULL) goto _OVER; @@ -670,6 +670,7 @@ _OVER: } static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) { + int32_t code = -1; SIdxObj idxObj = {0}; memcpy(idxObj.name, req->idxName, TSDB_TABLE_FNAME_LEN); memcpy(idxObj.stb, pStb->name, TSDB_TABLE_FNAME_LEN); @@ -681,21 +682,6 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re idxObj.stbUid = pStb->uid; idxObj.dbUid = pStb->dbUid; - int32_t code = -1; - // SField *pField0 = NULL; - - // SStbObj stbObj = {0}; - // SStbObj *pNew = &stbObj; - - // taosRLockLatch(&pOld->lock); - // memcpy(&stbObj, pOld, sizeof(SStbObj)); - // taosRUnLockLatch(&pOld->lock); - - // stbObj.pColumns = NULL; - // stbObj.pTags = NULL; - // stbObj.updateTime = taosGetTimestampMs(); - // stbObj.lock = 0; - int32_t tag = mndFindSuperTableTagId(pStb, req->colName); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 75411f6089..70ba7ed4ef 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -133,6 +133,7 @@ static void mndCalMqRebalance(SMnode *pMnode) { } } +#if 0 static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) { int32_t contLen = 0; void *pReq = mndBuildCheckpointTickMsg(&contLen, sec); @@ -145,6 +146,7 @@ static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) { tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); } } +#endif static void mndPullupTelem(SMnode *pMnode) { mTrace("pullup telem msg"); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 98811cde11..47ebdd706d 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -39,7 +39,7 @@ static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq); static int32_t mndProcessDropStreamReq(SRpcMsg *pReq); static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq); -static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq); +// static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq); /*static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);*/ static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq); static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta); @@ -66,8 +66,8 @@ int32_t mndInitStream(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DEPLOY_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DROP_RSP, mndTransProcessRsp); - mndSetMsgHandle(pMnode, TDMT_MND_STREAM_CHECKPOINT_TIMER, mndProcessStreamCheckpointTmr); - mndSetMsgHandle(pMnode, TDMT_MND_STREAM_BEGIN_CHECKPOINT, mndProcessStreamDoCheckpoint); + // mndSetMsgHandle(pMnode, TDMT_MND_STREAM_CHECKPOINT_TIMER, mndProcessStreamCheckpointTmr); + // mndSetMsgHandle(pMnode, TDMT_MND_STREAM_BEGIN_CHECKPOINT, mndProcessStreamDoCheckpoint); mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_REPORT_CHECKPOINT, mndTransProcessRsp); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_STREAMS, mndRetrieveStream); @@ -297,6 +297,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, pObj->triggerParam = pCreate->maxDelay; pObj->watermark = pCreate->watermark; pObj->fillHistory = pCreate->fillHistory; + pObj->deleteMark = pCreate->deleteMark; pObj->igCheckUpdate = pCreate->igUpdate; memcpy(pObj->sourceDb, pCreate->sourceDB, TSDB_DB_FNAME_LEN); @@ -342,9 +343,9 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, } int32_t numOfNULL = taosArrayGetSize(pCreate->fillNullCols); - if(numOfNULL > 0) { + if (numOfNULL > 0) { pObj->outputSchema.nCols += numOfNULL; - SSchema* pFullSchema = taosMemoryCalloc(pObj->outputSchema.nCols, sizeof(SSchema)); + SSchema *pFullSchema = taosMemoryCalloc(pObj->outputSchema.nCols, sizeof(SSchema)); if (!pFullSchema) { goto FAIL; } @@ -352,10 +353,10 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, int32_t nullIndex = 0; int32_t dataIndex = 0; for (int16_t i = 0; i < pObj->outputSchema.nCols; i++) { - SColLocation* pos = taosArrayGet(pCreate->fillNullCols, nullIndex); + SColLocation *pos = taosArrayGet(pCreate->fillNullCols, nullIndex); if (i < pos->slotId) { pFullSchema[i].bytes = pObj->outputSchema.pSchema[dataIndex].bytes; - pFullSchema[i].colId = i + 1; // pObj->outputSchema.pSchema[dataIndex].colId; + pFullSchema[i].colId = i + 1; // pObj->outputSchema.pSchema[dataIndex].colId; pFullSchema[i].flags = pObj->outputSchema.pSchema[dataIndex].flags; strcpy(pFullSchema[i].name, pObj->outputSchema.pSchema[dataIndex].name); pFullSchema[i].type = pObj->outputSchema.pSchema[dataIndex].type; @@ -380,6 +381,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj, .triggerType = pObj->trigger == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : pObj->trigger, .watermark = pObj->watermark, .igExpired = pObj->igExpired, + .deleteMark = pObj->deleteMark, .igCheckUpdate = pObj->igCheckUpdate, }; @@ -505,9 +507,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre SMCreateStbReq createReq = {0}; tstrncpy(createReq.name, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); createReq.numOfColumns = pStream->outputSchema.nCols; - createReq.pColumns = taosArrayInit(createReq.numOfColumns, sizeof(SField)); + createReq.pColumns = taosArrayInit_s(createReq.numOfColumns, sizeof(SField), createReq.numOfColumns); // build fields - taosArraySetSize(createReq.pColumns, createReq.numOfColumns); for (int32_t i = 0; i < createReq.numOfColumns; i++) { SField *pField = taosArrayGet(createReq.pColumns, i); tstrncpy(pField->name, pStream->outputSchema.pSchema[i].name, TSDB_COL_NAME_LEN); @@ -518,8 +519,7 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre if (pStream->tagSchema.nCols == 0) { createReq.numOfTags = 1; - createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField)); - taosArraySetSize(createReq.pTags, createReq.numOfTags); + createReq.pTags = taosArrayInit_s(createReq.numOfTags, sizeof(SField), 1); // build tags SField *pField = taosArrayGet(createReq.pTags, 0); strcpy(pField->name, "group_id"); @@ -528,8 +528,7 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre pField->bytes = 8; } else { createReq.numOfTags = pStream->tagSchema.nCols; - createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField)); - taosArraySetSize(createReq.pTags, createReq.numOfTags); + createReq.pTags = taosArrayInit_s(createReq.numOfTags, sizeof(SField), createReq.numOfTags); for (int32_t i = 0; i < createReq.numOfTags; i++) { SField *pField = taosArrayGet(createReq.pTags, i); pField->bytes = pStream->tagSchema.pSchema[i].bytes; @@ -726,7 +725,8 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER; // create stb for stream - if (createStreamReq.createStb == STREAM_CREATE_STABLE_TRUE && mndCreateStbForStream(pMnode, pTrans, &streamObj, pReq->info.conn.user) < 0) { + if (createStreamReq.createStb == STREAM_CREATE_STABLE_TRUE && + mndCreateStbForStream(pMnode, pTrans, &streamObj, pReq->info.conn.user) < 0) { mError("trans:%d, failed to create stb for stream %s since %s", pTrans->id, createStreamReq.name, terrstr()); mndTransDrop(pTrans); goto _OVER; @@ -778,6 +778,9 @@ _OVER: tFreeStreamObj(&streamObj); return code; } + +#if 0 + static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; @@ -942,6 +945,8 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) { return 0; } +#endif + static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 153bb8bd04..d127ceacf5 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -966,7 +966,9 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock while (numOfRows < rowsCapacity) { pShow->pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pShow->pIter, (void **)&pSub); - if (pShow->pIter == NULL) break; + if (pShow->pIter == NULL) { + break; + } taosRLockLatch(&pSub->lock); @@ -1075,6 +1077,9 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock } #endif + + pBlock->info.rows = numOfRows; + taosRUnLockLatch(&pSub->lock); sdbRelease(pSdb, pSub); } diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 53764e1fdf..acfaccafe2 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -107,8 +107,8 @@ void metaReaderClear(SMetaReader *pReader); int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid); int32_t metaGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid); int metaGetTableEntryByName(SMetaReader *pReader, const char *name); -int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags); -int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags); +int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList); +int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList); int32_t metaReadNext(SMetaReader *pReader); const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal); int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName); diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 5e97e75025..44b7e95f9e 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -244,6 +244,7 @@ void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]); void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]); void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]); + // SDelFile void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]); // tsdbFS.c ============================================================================================== @@ -687,6 +688,7 @@ typedef struct SSttBlockLoadInfo { int16_t *colIds; int32_t numOfCols; bool sttBlockLoaded; + int32_t numOfStt; // keep the last access position, this position may be used to reduce the binary times for // starting last block data for a new table @@ -752,7 +754,7 @@ bool tMergeTreeNext(SMergeTree *pMTree); TSDBROW tMergeTreeGetRow(SMergeTree *pMTree); void tMergeTreeClose(SMergeTree *pMTree); -SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols); +SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfStt); void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo); void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el); void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo); diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 6cc63e1a6a..eda47bf5de 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -122,7 +122,7 @@ typedef struct STbUidStore STbUidStore; #define META_BEGIN_HEAP_NIL 2 int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback); -int metaClose(SMeta* pMeta); +int metaClose(SMeta** pMeta); int metaBegin(SMeta* pMeta, int8_t fromSys); TXN* metaGetTxn(SMeta* pMeta); int metaCommit(SMeta* pMeta, TXN* txn); diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 85d8f031fb..05889e4767 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -32,9 +32,9 @@ typedef struct SMetaStbStatsEntry { } SMetaStbStatsEntry; typedef struct STagFilterResEntry { - uint64_t suid; // uid for super table - SList list; // the linked list of md5 digest, extracted from the serialized tag query condition - uint32_t qTimes; // queried times for current super table + SList list; // the linked list of md5 digest, extracted from the serialized tag query condition + uint32_t hitTimes; // queried times for current super table + uint32_t accTime; } STagFilterResEntry; struct SMetaCache { @@ -55,6 +55,7 @@ struct SMetaCache { // query cache struct STagFilterResCache { TdThreadMutex lock; + uint32_t accTimes; SHashObj* pTableEntry; SLRUCache* pUidResCache; } sTagFilterResCache; @@ -132,6 +133,7 @@ int32_t metaCacheOpen(SMeta* pMeta) { goto _err2; } + pCache->sTagFilterResCache.accTimes = 0; pCache->sTagFilterResCache.pTableEntry = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK); if (pCache->sTagFilterResCache.pTableEntry == NULL) { @@ -159,9 +161,9 @@ void metaCacheClose(SMeta* pMeta) { entryCacheClose(pMeta); statsCacheClose(pMeta); - taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry); taosLRUCacheCleanup(pMeta->pCache->sTagFilterResCache.pUidResCache); taosThreadMutexDestroy(&pMeta->pCache->sTagFilterResCache.lock); + taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry); taosMemoryFree(pMeta->pCache); pMeta->pCache = NULL; @@ -427,6 +429,32 @@ int32_t metaStatsCacheGet(SMeta* pMeta, int64_t uid, SMetaStbStats* pInfo) { return code; } +static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInvalidRes, int32_t keyLen, + SLRUCache* pCache, uint64_t suid) { + SListIter iter = {0}; + tdListInitIter((SList*)&(pEntry->list), &iter, TD_LIST_FORWARD); + + SListNode* pNode = NULL; + uint64_t buf[3]; + buf[0] = suid; + + int32_t len = sizeof(uint64_t) * tListLen(buf); + + while ((pNode = tdListNext(&iter)) != NULL) { + memcpy(&buf[1], pNode->data, keyLen); + + // check whether it is existed in LRU cache, and remove it from linked list if not. + LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len); + if (pRes == NULL) { // remove the item in the linked list + taosArrayPush(pInvalidRes, &pNode); + } else { + taosLRUCacheRelease(pCache, pRes, false); + } + } + + return 0; +} + int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1, bool* acquireRes) { // generate the composed key for LRU cache @@ -434,16 +462,18 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK SHashObj* pTableMap = pMeta->pCache->sTagFilterResCache.pTableEntry; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; - uint64_t buf[3] = {0}; - uint32_t times = 0; + uint64_t buf[4]; *acquireRes = 0; - buf[0] = suid; - memcpy(&buf[1], pKey, keyLen); + + buf[0] = (uint64_t)pTableMap; + buf[1] = suid; + memcpy(&buf[2], pKey, keyLen); taosThreadMutexLock(pLock); + pMeta->pCache->sTagFilterResCache.accTimes += 1; - int32_t len = keyLen + sizeof(uint64_t); + int32_t len = keyLen + sizeof(uint64_t) * 2; LRUHandle* pHandle = taosLRUCacheLookup(pCache, buf, len); if (pHandle == NULL) { taosThreadMutexUnlock(pLock); @@ -465,48 +495,17 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK // set the result into the buffer taosArrayAddBatch(pList1, p + sizeof(int32_t), size); - times = atomic_add_fetch_32(&(*pEntry)->qTimes, 1); + (*pEntry)->hitTimes += 1; + + uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes; + if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) { + metaInfo("cache hit:%d, total acc:%d, rate:%.2f", (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc); + } + taosLRUCacheRelease(pCache, pHandle, false); // unlock meta taosThreadMutexUnlock(pLock); - - // check if scanning all items are necessary or not - if (times >= 5000 && TD_DLIST_NELES(&(*pEntry)->list) > 10) { - taosThreadMutexLock(pLock); - - SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES); - - SListIter iter = {0}; - tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD); - - SListNode* pNode = NULL; - while ((pNode = tdListNext(&iter)) != NULL) { - memcpy(&buf[1], pNode->data, keyLen); - - // check whether it is existed in LRU cache, and remove it from linked list if not. - LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len); - if (pRes == NULL) { // remove the item in the linked list - taosArrayPush(pInvalidRes, &pNode); - } else { - taosLRUCacheRelease(pCache, pRes, false); - } - } - - // remove the keys, of which query uid lists have been replaced already. - size_t s = taosArrayGetSize(pInvalidRes); - for (int32_t i = 0; i < s; ++i) { - SListNode** p1 = taosArrayGet(pInvalidRes, i); - tdListPopNode(&(*pEntry)->list, *p1); - taosMemoryFree(*p1); - } - - atomic_store_32(&(*pEntry)->qTimes, 0); // reset the query times - taosArrayDestroy(pInvalidRes); - - taosThreadMutexUnlock(pLock); - } - return TSDB_CODE_SUCCESS; } @@ -514,9 +513,53 @@ static void freePayload(const void* key, size_t keyLen, void* value) { if (value == NULL) { return; } + + const uint64_t* p = key; + if (keyLen != sizeof(int64_t) * 4) { + metaError("key length is invalid, length:%d, expect:%d", (int32_t)keyLen, (int32_t)sizeof(uint64_t) * 2); + return; + } + + SHashObj* pHashObj = (SHashObj*)p[0]; + STagFilterResEntry** pEntry = taosHashGet(pHashObj, &p[1], sizeof(uint64_t)); + + { + int64_t st = taosGetTimestampUs(); + + SListIter iter = {0}; + tdListInitIter((SList*)&((*pEntry)->list), &iter, TD_LIST_FORWARD); + + SListNode* pNode = NULL; + while ((pNode = tdListNext(&iter)) != NULL) { + uint64_t* digest = (uint64_t*)pNode->data; + if (digest[0] == p[2] && digest[1] == p[3]) { + void* tmp = tdListPopNode(&((*pEntry)->list), pNode); + taosMemoryFree(tmp); + + int64_t et = taosGetTimestampUs(); + metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)), + (et - st) / 1000.0); + break; + } + } + } + taosMemoryFree(value); } +static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyLen, uint64_t suid) { + STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry)); + if (p == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + p->hitTimes = 0; + tdListInit(&p->list, keyLen); + taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES); + tdListAppend(&p->list, pKey); + return 0; +} + // check both the payload size and selectivity ratio int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen, double selectivityRatio) { @@ -540,45 +583,61 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int SHashObj* pTableEntry = pMeta->pCache->sTagFilterResCache.pTableEntry; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; + // the format of key: + // hash table address(8bytes) + suid(8bytes) + MD5 digest(16bytes) + + uint64_t buf[4] = {0}; + buf[0] = (uint64_t)pTableEntry; + buf[1] = suid; + memcpy(&buf[2], pKey, keyLen); + ASSERT(keyLen == 16); + + int32_t code = 0; taosThreadMutexLock(pLock); STagFilterResEntry** pEntry = taosHashGet(pTableEntry, &suid, sizeof(uint64_t)); if (pEntry == NULL) { - STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry)); - p->qTimes = 0; - tdListInit(&p->list, keyLen); - taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES); - tdListAppend(&p->list, pKey); + code = addNewEntry(pTableEntry, pKey, keyLen, suid); + if (code != TSDB_CODE_SUCCESS) { + goto _end; + } } else { - tdListAppend(&(*pEntry)->list, pKey); - } - - uint64_t buf[3] = {0}; - buf[0] = suid; - - memcpy(&buf[1], pKey, keyLen); - if (sizeof(uint64_t) + keyLen != 24) { - metaError("meta/cache: incorrect keyLen:%" PRId32 " length.", keyLen); - return TSDB_CODE_FAILED; + // check if it exists or not + size_t size = listNEles(&(*pEntry)->list); + if (size == 0) { + tdListAppend(&(*pEntry)->list, pKey); + } else { + SListNode* pNode = listHead(&(*pEntry)->list); + uint64_t* p = (uint64_t*)pNode->data; + if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) { + // we have already found the existed items, no need to added to cache anymore. + taosThreadMutexUnlock(pLock); + return TSDB_CODE_SUCCESS; + } else { // not equal, append it + tdListAppend(&(*pEntry)->list, pKey); + } + } } // add to cache. - int32_t ret = taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL, - TAOS_LRU_PRIORITY_LOW); - + taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) * 2 + keyLen, pPayload, payloadLen, freePayload, NULL, + TAOS_LRU_PRIORITY_LOW); +_end: taosThreadMutexUnlock(pLock); - metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d, ret:%d", TD_VID(pMeta->pVnode), - suid, (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry), ret); + metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid, + (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry)); - return TSDB_CODE_SUCCESS; + return code; } // remove the lru cache that are expired due to the tags value update, or creating, or dropping, of child tables int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) { int32_t keyLen = sizeof(uint64_t) * 3; - uint64_t p[3] = {0}; - p[0] = suid; + uint64_t p[4] = {0}; + + p[0] = (uint64_t)pMeta->pCache->sTagFilterResCache.pTableEntry; + p[1] = suid; TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock; @@ -594,11 +653,11 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) { SListNode* pNode = NULL; while ((pNode = tdListNext(&iter)) != NULL) { - memcpy(&p[1], pNode->data, 16); + memcpy(&p[2], pNode->data, 16); taosLRUCacheErase(pMeta->pCache->sTagFilterResCache.pUidResCache, p, keyLen); } - (*pEntry)->qTimes = 0; + (*pEntry)->hitTimes = 0; tdListEmpty(&(*pEntry)->list); taosThreadMutexUnlock(pLock); diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index e5bc301fa1..1d0b11e26a 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -201,7 +201,8 @@ _err: return -1; } -int metaClose(SMeta *pMeta) { +int metaClose(SMeta **ppMeta) { + SMeta *pMeta = *ppMeta; if (pMeta) { if (pMeta->pEnv) metaAbort(pMeta); if (pMeta->pCache) metaCacheClose(pMeta); @@ -221,7 +222,8 @@ int metaClose(SMeta *pMeta) { if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb); if (pMeta->pEnv) tdbClose(pMeta->pEnv); metaDestroyLock(pMeta); - taosMemoryFree(pMeta); + + taosMemoryFreeClear(*ppMeta); } return 0; diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index a4201117d6..91518f7a0b 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1375,13 +1375,14 @@ static int32_t metaGetTableTagByUid(SMeta *pMeta, int64_t suid, int64_t uid, voi return ret; } -int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags) { - const int32_t LIMIT = 4096; + +int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) { + const int32_t LIMIT = 128; int32_t isLock = false; int32_t sz = uidList ? taosArrayGetSize(uidList) : 0; for (int i = 0; i < sz; i++) { - tb_uid_t *id = taosArrayGet(uidList, i); + STUidTagInfo *p = taosArrayGet(uidList, i); if (i % LIMIT == 0) { if (isLock) metaULock(pMeta); @@ -1390,52 +1391,73 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHas isLock = true; } - if (taosHashGet(tags, id, sizeof(tb_uid_t)) == NULL) { - void *val = NULL; - int32_t len = 0; - if (metaGetTableTagByUid(pMeta, suid, *id, &val, &len, false) == 0) { - taosHashPut(tags, id, sizeof(tb_uid_t), val, len); - tdbFree(val); - } else { - metaError("vgId:%d, failed to table IDs, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid, - *id); - } + // if (taosHashGet(tags, &p->uid, sizeof(tb_uid_t)) == NULL) { + void *val = NULL; + int32_t len = 0; + if (metaGetTableTagByUid(pMeta, suid, p->uid, &val, &len, false) == 0) { + p->pTagVal = taosMemoryMalloc(len); + memcpy(p->pTagVal, val, len); + tdbFree(val); + } else { + metaError("vgId:%d, failed to table tags, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid, + p->uid); } } + // } if (isLock) metaULock(pMeta); - return 0; } -int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) { +int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *pUidTagInfo) { SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid, 1); - SHashObj *uHash = NULL; - size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids - if (len > 0) { - uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - for (int i = 0; i < len; i++) { - int64_t *uid = taosArrayGet(uidList, i); - taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i)); + // If len > 0 means there already have uids, and we only want the + // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept + // in the hash map, that may require a lot of memory + SHashObj *pSepecifiedUidMap = NULL; + size_t numOfElems = taosArrayGetSize(pUidTagInfo); + if (numOfElems > 0) { + pSepecifiedUidMap = + taosHashInit(numOfElems / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + for (int i = 0; i < numOfElems; i++) { + STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, i); + taosHashPut(pSepecifiedUidMap, &pTagInfo->uid, sizeof(uint64_t), &i, sizeof(int32_t)); } } - while (1) { - tb_uid_t id = metaCtbCursorNext(pCur); - if (id == 0) { - break; - } + if (numOfElems == 0) { // all data needs to be added into the pUidTagInfo list + while (1) { + tb_uid_t uid = metaCtbCursorNext(pCur); + if (uid == 0) { + break; + } - if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) { - continue; - } else if (len == 0) { - taosArrayPush(uidList, &id); + STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; + info.pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(info.pTagVal, pCur->pVal, pCur->vLen); + taosArrayPush(pUidTagInfo, &info); } + } else { // only the specified tables need to be added + while (1) { + tb_uid_t uid = metaCtbCursorNext(pCur); + if (uid == 0) { + break; + } - taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen); + int32_t *index = taosHashGet(pSepecifiedUidMap, &uid, sizeof(uint64_t)); + if (index == NULL) { + continue; + } + + STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, *index); + if (pTagInfo->pTagVal == NULL) { + pTagInfo->pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(pTagInfo->pTagVal, pCur->pVal, pCur->vLen); + } + } } - taosHashCleanup(uHash); + taosHashCleanup(pSepecifiedUidMap); metaCloseCtbCursor(pCur, 1); return TSDB_CODE_SUCCESS; } diff --git a/source/dnode/vnode/src/sma/smaFS.c b/source/dnode/vnode/src/sma/smaFS.c index ef872d055e..5dbe91f836 100644 --- a/source/dnode/vnode/src/sma/smaFS.c +++ b/source/dnode/vnode/src/sma/smaFS.c @@ -325,7 +325,7 @@ _exit: } static int32_t tdRSmaFSScanAndTryFix(SSma *pSma) { - int32_t code = 0; + int32_t code = 0; #if 0 int32_t lino = 0; SVnode *pVnode = pSma->pVnode; @@ -559,7 +559,7 @@ int32_t tdRSmaFSRef(SSma *pSma, SRSmaFS *pFS) { SRSmaFS *qFS = RSMA_FS(pStat); int32_t size = taosArrayGetSize(qFS->aQTaskInf); - pFS->aQTaskInf = taosArrayInit(size, sizeof(SQTaskFile)); + pFS->aQTaskInf = taosArrayInit_s(size, sizeof(SQTaskFile), size); if (pFS->aQTaskInf == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); @@ -574,7 +574,6 @@ int32_t tdRSmaFSRef(SSma *pSma, SRSmaFS *pFS) { } } - taosArraySetSize(pFS->aQTaskInf, size); memcpy(pFS->aQTaskInf->pData, qFS->aQTaskInf->pData, size * sizeof(SQTaskFile)); _exit: @@ -640,9 +639,7 @@ int32_t tdRSmaFSCopy(SSma *pSma, SRSmaFS *pFS) { code = tdRSmaFSCreate(pFS, size); TSDB_CHECK_CODE(code, lino, _exit); - - taosArraySetSize(pFS->aQTaskInf, size); - memcpy(pFS->aQTaskInf->pData, qFS->aQTaskInf->pData, size * sizeof(SQTaskFile)); + taosArrayAddBatch(pFS->aQTaskInf, qFS->aQTaskInf->pData, size); _exit: if (code) { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index e85dfc66de..9bdd8f4bdf 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -106,7 +106,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) { return NULL; } - if (streamLoadTasks(pTq->pStreamMeta) < 0) { + if (streamLoadTasks(pTq->pStreamMeta, walGetCommittedVer(pVnode->pWal)) < 0) { return NULL; } @@ -849,12 +849,9 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL pHandle->execHandle.task = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols, NULL); - /*A(pHandle->execHandle.task);*/ void* scanner = NULL; qExtractStreamScanner(pHandle->execHandle.task, &scanner); - /*A(scanner);*/ pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner); - /*A(pHandle->execHandle.pExecReader);*/ } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) { pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL); pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode); @@ -887,6 +884,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId); if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) { + return -1; } } else { // TODO handle qmsg and exec modification @@ -898,6 +896,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL qStreamCloseTsdbReader(pHandle->execHandle.task); } if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) { + return -1; } // close handle } @@ -1216,6 +1215,9 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t version, char* msg, int32_t m return -1; } + atomic_store_8(&pTask->fillHistory, 0); + streamMetaSaveTask(pTq->pStreamMeta, pTask); + streamMetaReleaseTask(pTq->pStreamMeta, pTask); return 0; diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 05ed8d7348..c505a7d0ae 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -209,6 +209,8 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { tEncoderInit(&encoder, buf, vlen); if (tEncodeSTqHandle(&encoder, pHandle) < 0) { + tEncoderClear(&encoder); + taosMemoryFree(buf); return -1; } @@ -216,18 +218,26 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) { if (tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) { + tEncoderClear(&encoder); + taosMemoryFree(buf); return -1; } if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, txn) < 0) { + tEncoderClear(&encoder); + taosMemoryFree(buf); return -1; } if (tdbCommit(pTq->pMetaDB, txn) < 0) { + tEncoderClear(&encoder); + taosMemoryFree(buf); return -1; } if (tdbPostCommit(pTq->pMetaDB, txn) < 0) { + tEncoderClear(&encoder); + taosMemoryFree(buf); return -1; } diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 2e3dc86ce9..cc1f147ac2 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -308,7 +308,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) taosWUnLockLatch(&pTq->pushLock); } - if (vnodeIsRoleLeader(pTq->pVnode)) { + if (!tsDisableStream && vnodeIsRoleLeader(pTq->pVnode)) { if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0; if (msgType == TDMT_VND_SUBMIT) { void* data = taosMemoryMalloc(len); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index c67fa4bdf1..c05206785b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -17,7 +17,7 @@ static int32_t tsdbOpenBICache(STsdb *pTsdb) { int32_t code = 0; - SLRUCache *pCache = taosLRUCacheInit(5 * 1024 * 1024, -1, .5); + SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -48,7 +48,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) { SLRUCache *pCache = NULL; size_t cfgCapacity = pTsdb->pVnode->config.cacheLastSize * 1024 * 1024; - pCache = taosLRUCacheInit(cfgCapacity, -1, .5); + pCache = taosLRUCacheInit(cfgCapacity, 1, .5); if (pCache == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -278,6 +278,11 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TS goto _invalidate; } + if (nCol != pTSchema->numOfCols) { + invalidate = true; + goto _invalidate; + } + SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol); if (keyTs > tTsVal->ts) { STColumn *pTColumn = &pTSchema->columns[0]; @@ -293,6 +298,12 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TS SColVal colVal = {0}; tsdbRowGetColVal(row, pTSchema, iCol, &colVal); + + if (colVal.cid != tColVal->cid) { + invalidate = true; + goto _invalidate; + } + if (!COL_VAL_IS_NONE(&colVal)) { if (keyTs == tTsVal1->ts && !COL_VAL_IS_NONE(tColVal)) { invalidate = true; @@ -302,7 +313,8 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TS SLastCol lastCol = {.ts = keyTs, .colVal = colVal}; if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) { SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol); - taosMemoryFree(pLastCol->colVal.value.pData); + if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData) + taosMemoryFree(pLastCol->colVal.value.pData); lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData); if (lastCol.colVal.value.pData == NULL) { @@ -387,6 +399,11 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb goto _invalidate; } + if (nCol != pTSchema->numOfCols) { + invalidate = true; + goto _invalidate; + } + SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol); if (keyTs > tTsVal->ts) { STColumn *pTColumn = &pTSchema->columns[0]; @@ -402,6 +419,12 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb SColVal colVal = {0}; tsdbRowGetColVal(row, pTSchema, iCol, &colVal); + + if (colVal.cid != tColVal->cid) { + invalidate = true; + goto _invalidate; + } + if (COL_VAL_IS_VALUE(&colVal)) { if (keyTs == tTsVal1->ts && COL_VAL_IS_VALUE(tColVal)) { invalidate = true; @@ -411,7 +434,8 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb SLastCol lastCol = {.ts = keyTs, .colVal = colVal}; if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) { SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol); - taosMemoryFree(pLastCol->colVal.value.pData); + if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData) + taosMemoryFree(pLastCol->colVal.value.pData); lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData); if (lastCol.colVal.value.pData == NULL) { @@ -692,6 +716,7 @@ typedef struct SFSNextRowIter { SArray *aDFileSet; SDataFReader **pDataFReader; SArray *aBlockIdx; + LRUHandle *aBlockIdxHandle; SBlockIdx *pBlockIdx; SMapData blockMap; int32_t nBlock; @@ -745,6 +770,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { } // tMapDataReset(&state->blockIdxMap); + /* if (!state->aBlockIdx) { state->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); } else { @@ -752,6 +778,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { } code = tsdbReadBlockIdx(*state->pDataFReader, state->aBlockIdx); if (code) goto _err; + */ + int32_t code = tsdbCacheGetBlockIdx(state->pTsdb->biCache, *state->pDataFReader, &state->aBlockIdxHandle); + if (code != TSDB_CODE_SUCCESS || state->aBlockIdxHandle == NULL) { + goto _err; + } + state->aBlockIdx = (SArray *)taosLRUCacheValue(state->pTsdb->biCache, state->aBlockIdxHandle); /* if (state->pBlockIdx) { */ /* } */ @@ -821,7 +853,10 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) { // resetLastBlockLoadInfo(state->pLoadInfo); if (state->aBlockIdx) { - taosArrayDestroy(state->aBlockIdx); + // taosArrayDestroy(state->aBlockIdx); + tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle); + + state->aBlockIdxHandle = NULL; state->aBlockIdx = NULL; } @@ -844,7 +879,10 @@ _err: resetLastBlockLoadInfo(state->pLoadInfo); }*/ if (state->aBlockIdx) { - taosArrayDestroy(state->aBlockIdx); + // taosArrayDestroy(state->aBlockIdx); + tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle); + + state->aBlockIdxHandle = NULL; state->aBlockIdx = NULL; } if (state->pBlockData) { @@ -870,7 +908,10 @@ int32_t clearNextRowFromFS(void *iter) { state->pDataFReader = NULL; }*/ if (state->aBlockIdx) { - taosArrayDestroy(state->aBlockIdx); + // taosArrayDestroy(state->aBlockIdx); + tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle); + + state->aBlockIdxHandle = NULL; state->aBlockIdx = NULL; } if (state->pBlockData) { diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index a2de1bdf4e..6ea169a5e3 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -41,6 +41,13 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p int32_t slotId = slotIds[i]; SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId); + // add check for null value, caused by the modification of table schema (new column added). + if (pColVal == NULL) { + p->ts = 0; + p->isNull = true; + continue; + } + p->ts = pColVal->ts; p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal); allNullRow = p->isNull & allNullRow; @@ -99,6 +106,38 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p return TSDB_CODE_SUCCESS; } +static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* idstr) { + int32_t numOfTables = p->numOfTables; + + if (suid != 0) { + p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, suid, -1, 1); + if (p->pSchema == NULL) { + taosMemoryFree(p); + tsdbWarn("stable:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", suid, idstr); + return TSDB_CODE_PAR_TABLE_NOT_EXIST; + } + } else { + for (int32_t i = 0; i < numOfTables; ++i) { + uint64_t uid = p->pTableList[i].uid; + p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, uid, -1, 1); + if (p->pSchema != NULL) { + break; + } + + tsdbWarn("table:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", uid, idstr); + } + + // all queried tables have been dropped already, return immediately. + if (p->pSchema == NULL) { + taosMemoryFree(p); + tsdbWarn("all queried tables has been dropped, try next group, %s", idstr); + return TSDB_CODE_PAR_TABLE_NOT_EXIST; + } + } + + return TSDB_CODE_SUCCESS; +} + int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols, uint64_t suid, void** pReader, const char* idstr) { *pReader = NULL; @@ -119,11 +158,15 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, return TSDB_CODE_SUCCESS; } - STableKeyInfo* pKeyInfo = &((STableKeyInfo*)pTableIdList)[0]; - p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, pKeyInfo->uid, -1, 1); p->pTableList = pTableIdList; p->numOfTables = numOfTables; + int32_t code = setTableSchema(p, suid, idstr); + if (code != TSDB_CODE_SUCCESS) { + tsdbCacherowsReaderClose(p); + return code; + } + p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES); if (p->transferBuf == NULL) { tsdbCacherowsReaderClose(p); @@ -140,7 +183,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, } } - p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0); + int32_t numOfStt = ((SVnode*)pVnode)->config.sttTrigger; + p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0, numOfStt); if (p->pLoadInfo == NULL) { tsdbCacherowsReaderClose(p); return TSDB_CODE_OUT_OF_MEMORY; diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c index 3c944584de..42728be657 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFile.c +++ b/source/dnode/vnode/src/tsdb/tsdbFile.c @@ -92,24 +92,56 @@ static int32_t tGetSmaFile(uint8_t *p, SSmaFile *pSmaFile) { } // EXPOSED APIS ================================================== +static char* getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t commitId, char fname[]) { + const char* p1 = tfsGetDiskPath(pTsdb->pVnode->pTfs, did); + int32_t len = strlen(p1); + + char* p = memcpy(fname, p1, len); + p += len; + + *(p++) = TD_DIRSEP[0]; + len = strlen(pTsdb->path); + + memcpy(p, pTsdb->path, len); + p += len; + + *(p++) = TD_DIRSEP[0]; + *(p++) = 'v'; + + p += titoa(TD_VID(pTsdb->pVnode), 10, p); + *(p++) = 'f'; + + p += titoa(fid, 10, p); + + memcpy(p, "ver", 3); + p += 3; + + p += titoa(commitId, 10, p); + return p; +} + void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pHeadF->commitID, ".head"); + char* p = getFileNamePrefix(pTsdb, did, fid, pHeadF->commitID, fname); + memcpy(p, ".head", 5); + p[5] = 0; } void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pDataF->commitID, ".data"); + char* p = getFileNamePrefix(pTsdb, did, fid, pDataF->commitID, fname); + memcpy(p, ".data", 5); + p[5] = 0; } void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSttF->commitID, ".stt"); + char* p = getFileNamePrefix(pTsdb, did, fid, pSttF->commitID, fname); + memcpy(p, ".stt", 4); + p[4] = 0; } void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]) { - snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did), - TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSmaF->commitID, ".sma"); + char* p = getFileNamePrefix(pTsdb, did, fid, pSmaF->commitID, fname); + memcpy(p, ".sma", 4); + p[4] = 0; } bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2) { return pDelFile1->commitID == pDelFile2->commitID; } diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index a97cd3db27..d4af0422d7 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -31,14 +31,16 @@ struct SLDataIter { SSttBlockLoadInfo *pBlockLoadInfo; }; -SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols) { - SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(TSDB_MAX_STT_TRIGGER, sizeof(SSttBlockLoadInfo)); +SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfSttTrigger) { + SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(numOfSttTrigger, sizeof(SSttBlockLoadInfo)); if (pLoadInfo == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + pLoadInfo->numOfStt = numOfSttTrigger; + + for (int32_t i = 0; i < numOfSttTrigger; ++i) { pLoadInfo[i].blockIndex[0] = -1; pLoadInfo[i].blockIndex[1] = -1; pLoadInfo[i].currentLoadBlockIndex = 1; @@ -63,7 +65,7 @@ SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, } void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) { - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) { pLoadInfo[i].currentLoadBlockIndex = 1; pLoadInfo[i].blockIndex[0] = -1; pLoadInfo[i].blockIndex[1] = -1; @@ -77,14 +79,14 @@ void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) { } void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el) { - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) { *el += pLoadInfo[i].elapsedTime; *blocks += pLoadInfo[i].loadBlocks; } } void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) { - for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) { + for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) { pLoadInfo[i].currentLoadBlockIndex = 1; pLoadInfo[i].blockIndex[0] = -1; pLoadInfo[i].blockIndex[1] = -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 4466721fb9..c83fdb2e4f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -133,17 +133,17 @@ typedef struct SFileBlockDumpInfo { bool allDumped; } SFileBlockDumpInfo; -typedef struct SUidOrderCheckInfo { +typedef struct STableUidList { uint64_t* tableUidList; // access table uid list in uid ascending order list int32_t currentIndex; // index in table uid list -} SUidOrderCheckInfo; +} STableUidList; typedef struct SReaderStatus { bool loadFromFile; // check file stage bool composedDataBlock; // the returned data block is a composed block or not SHashObj* pTableMap; // SHash STableBlockScanInfo** pTableIter; // table iterator used in building in-memory buffer data blocks. - SUidOrderCheckInfo uidCheckInfo; // check all table in uid order + STableUidList uidList; // check tables in uid order, to avoid the repeatly load of blocks in STT. SFileBlockDumpInfo fBlockDumpInfo; SDFileSet* pCurrentFileset; // current opened file set SBlockData fileBlockData; @@ -319,9 +319,19 @@ static void* getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index) { return (*pBucket) + (index % pBuf->numPerBucket) * sizeof(STableBlockScanInfo); } +static int32_t uidComparFunc(const void* p1, const void* p2) { + uint64_t pu1 = *(uint64_t*)p1; + uint64_t pu2 = *(uint64_t*)p2; + if (pu1 == pu2) { + return 0; + } else { + return (pu1 < pu2) ? -1 : 1; + } +} + // NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList, - int32_t numOfTables) { + STableUidList* pUidList, int32_t numOfTables) { // allocate buffer in order to load data blocks from file // todo use simple hash instead, optimize the memory consumption SHashObj* pTableMap = @@ -333,9 +343,18 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf int64_t st = taosGetTimestampUs(); initBlockScanInfoBuf(pBuf, numOfTables); + pUidList->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t)); + if (pUidList->tableUidList == NULL) { + return NULL; + } + pUidList->currentIndex = 0; + for (int32_t j = 0; j < numOfTables; ++j) { STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(pBuf, j); + pScanInfo->uid = idList[j].uid; + pUidList->tableUidList[j] = idList[j].uid; + if (ASCENDING_TRAVERSE(pTsdbReader->order)) { int64_t skey = pTsdbReader->window.skey; pScanInfo->lastKey = (skey > INT64_MIN) ? (skey - 1) : skey; @@ -349,6 +368,8 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf pScanInfo->lastKey, pTsdbReader->idStr); } + taosSort(pUidList->tableUidList, numOfTables, sizeof(uint64_t), uidComparFunc); + pTsdbReader->cost.createScanInfoList = (taosGetTimestampUs() - st) / 1000.0; tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, elapsed time:%.2f ms, %s", pTsdbReader, numOfTables, (sizeof(STableBlockScanInfo) * numOfTables) / 1024.0, pTsdbReader->cost.createScanInfoList, @@ -425,19 +446,6 @@ static STimeWindow updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow) { return win; } -static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* capacity) { - int32_t rowLen = 0; - for (int32_t i = 0; i < pCond->numOfCols; ++i) { - rowLen += pCond->colList[i].bytes; - } - - // make sure the output SSDataBlock size be less than 2MB. - const int32_t TWOMB = 2 * 1024 * 1024; - if ((*capacity) * rowLen > TWOMB) { - (*capacity) = TWOMB / rowLen; - } -} - // init file iterator static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader) { size_t numOfFileset = taosArrayGetSize(aDFileSet); @@ -466,8 +474,10 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb if (pLReader->pInfo == NULL) { // here we ignore the first column, which is always be the primary timestamp column - pLReader->pInfo = - tCreateLastBlockLoadInfo(pReader->pSchema, &pReader->suppInfo.colId[1], pReader->suppInfo.numOfCols - 1); + SBlockLoadSuppInfo* pInfo = &pReader->suppInfo; + + int32_t numOfStt = pReader->pTsdb->pVnode->config.sttTrigger; + pLReader->pInfo = tCreateLastBlockLoadInfo(pReader->pSchema, &pInfo->colId[1], pInfo->numOfCols - 1, numOfStt); if (pLReader->pInfo == NULL) { tsdbDebug("init fileset iterator failed, code:%s %s", tstrerror(terrno), pReader->idStr); return terrno; @@ -478,13 +488,15 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb return TSDB_CODE_SUCCESS; } -static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) { +static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bool *hasNext) { bool asc = ASCENDING_TRAVERSE(pIter->order); int32_t step = asc ? 1 : -1; pIter->index += step; + int32_t code = 0; if ((asc && pIter->index >= pIter->numOfFiles) || ((!asc) && pIter->index < 0)) { - return false; + *hasNext = false; + return TSDB_CODE_SUCCESS; } SIOCostSummary* pSum = &pReader->cost; @@ -504,7 +516,7 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) { pReader->status.pCurrentFileset = (SDFileSet*)taosArrayGet(pIter->pFileList, pIter->index); - int32_t code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset); + code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset); if (code != TSDB_CODE_SUCCESS) { goto _err; } @@ -518,24 +530,28 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) { if ((asc && win.skey > pReader->window.ekey) || (!asc && win.ekey < pReader->window.skey)) { tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %s", pReader, pReader->window.skey, pReader->window.ekey, pReader->idStr); - return false; + *hasNext = false; + return TSDB_CODE_SUCCESS; } if ((asc && (win.ekey < pReader->window.skey)) || ((!asc) && (win.skey > pReader->window.ekey))) { pIter->index += step; if ((asc && pIter->index >= pIter->numOfFiles) || ((!asc) && pIter->index < 0)) { - return false; + *hasNext = false; + return TSDB_CODE_SUCCESS; } continue; } tsdbDebug("%p file found fid:%d for qrange:%" PRId64 "-%" PRId64 ", %s", pReader, fid, pReader->window.skey, pReader->window.ekey, pReader->idStr); - return true; + *hasNext = true; + return TSDB_CODE_SUCCESS; } _err: - return false; + *hasNext = false; + return code; } static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) { @@ -682,9 +698,6 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd goto _end; } - // todo refactor. - limitOutputBufferSize(pCond, &pReader->capacity); - // allocate buffer in order to load data blocks from file SBlockLoadSuppInfo* pSup = &pReader->suppInfo; pSup->pColAgg = taosArrayInit(pCond->numOfCols, sizeof(SColumnDataAgg)); @@ -701,7 +714,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd goto _end; } - setColumnIdSlotList(&pReader->suppInfo, pCond->colList, pCond->pSlotList, pCond->numOfCols); + setColumnIdSlotList(pSup, pCond->colList, pCond->pSlotList, pCond->numOfCols); tsdbInitReaderLock(pReader); @@ -715,57 +728,75 @@ _end: } static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) { - // SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx)); - - int64_t st = taosGetTimestampUs(); - // int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx); + int64_t st = taosGetTimestampUs(); LRUHandle* handle = NULL; int32_t code = tsdbCacheGetBlockIdx(pFileReader->pTsdb->biCache, pFileReader, &handle); if (code != TSDB_CODE_SUCCESS || handle == NULL) { goto _end; } + int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap); + SArray* aBlockIdx = (SArray*)taosLRUCacheValue(pFileReader->pTsdb->biCache, handle); size_t num = taosArrayGetSize(aBlockIdx); if (num == 0) { tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle); - // taosArrayDestroy(aBlockIdx); return TSDB_CODE_SUCCESS; } + // todo binary search to the start position int64_t et1 = taosGetTimestampUs(); - SBlockIdx* pBlockIdx = NULL; - for (int32_t i = 0; i < num; ++i) { + SBlockIdx* pBlockIdx = NULL; + STableUidList* pList = &pReader->status.uidList; + + int32_t i = 0, j = 0; + while (i < num && j < numOfTables) { pBlockIdx = (SBlockIdx*)taosArrayGet(aBlockIdx, i); - - // uid check if (pBlockIdx->suid != pReader->suid) { + i += 1; continue; } - // this block belongs to a table that is not queried. - void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t)); - if (p == NULL) { + if (pBlockIdx->uid < pList->tableUidList[j]) { + i += 1; continue; } - STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; - if (pScanInfo->pBlockList == NULL) { - pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex)); + if (pBlockIdx->uid > pList->tableUidList[j]) { + j += 1; + continue; } - taosArrayPush(pIndexList, pBlockIdx); + if (pBlockIdx->uid == pList->tableUidList[j]) { + // this block belongs to a table that is not queried. + void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t)); + if (p == NULL) { + tsdbError("failed to locate the tableBlockScan Info in hashmap, uid:%" PRIu64 ", %s", pBlockIdx->uid, + pReader->idStr); + return TSDB_CODE_APP_ERROR; + } + + STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; + if (pScanInfo->pBlockList == NULL) { + pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex)); + } + + taosArrayPush(pIndexList, pBlockIdx); + + i += 1; + j += 1; + } } int64_t et2 = taosGetTimestampUs(); - tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s", - (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, pReader->idStr); + tsdbDebug("load block index for %d/%d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s", + numOfTables, (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, + pReader->idStr); pReader->cost.headFileLoadTime += (et1 - st) / 1000.0; _end: - // taosArrayDestroy(aBlockIdx); tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle); return code; } @@ -1691,7 +1722,7 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* double elapsedTime = (taosGetTimestampUs() - st) / 1000.0; tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange:%" PRId64 - " - %" PRId64 ", uid:%"PRIu64", %s", + " - %" PRId64 ", uid:%" PRIu64 ", %s", pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, pBlockScanInfo->uid, pReader->idStr); @@ -1721,7 +1752,7 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, SVersionRange* pVerRange) { - int32_t step = ASCENDING_TRAVERSE(pLastBlockReader->order)? 1:-1; + int32_t step = ASCENDING_TRAVERSE(pLastBlockReader->order) ? 1 : -1; while (1) { bool hasVal = tMergeTreeNext(&pLastBlockReader->mergeTree); @@ -2407,7 +2438,8 @@ static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScan w.ekey = pScanInfo->lastKey + step; } - tsdbDebug("init last block reader, window:%"PRId64"-%"PRId64", uid:%"PRIu64", %s", w.skey, w.ekey, pScanInfo->uid, pReader->idStr); + tsdbDebug("init last block reader, window:%" PRId64 "-%" PRId64 ", uid:%" PRIu64 ", %s", w.skey, w.ekey, + pScanInfo->uid, pReader->idStr); int32_t code = tMergeTreeOpen(&pLBlockReader->mergeTree, (pLBlockReader->order == TSDB_ORDER_DESC), pReader->pFileReader, pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange, pLBlockReader->pInfo, false, pReader->idStr); @@ -2770,13 +2802,19 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) { SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx)); while (1) { - bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader); + bool hasNext = false; + int32_t code = filesetIteratorNext(&pStatus->fileIter, pReader, &hasNext); + if (code) { + taosArrayDestroy(pIndexList); + return code; + } + if (!hasNext) { // no data files on disk break; } taosArrayClear(pIndexList); - int32_t code = doLoadBlockIndex(pReader, pReader->pFileReader, pIndexList); + code = doLoadBlockIndex(pReader, pReader->pFileReader, pIndexList); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroy(pIndexList); return code; @@ -2824,74 +2862,15 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) { return TSDB_CODE_SUCCESS; } -static int32_t uidComparFunc(const void* p1, const void* p2) { - uint64_t pu1 = *(uint64_t*)p1; - uint64_t pu2 = *(uint64_t*)p2; - if (pu1 == pu2) { - return 0; - } else { - return (pu1 < pu2) ? -1 : 1; - } +static void resetTableListIndex(SReaderStatus* pStatus) { + STableUidList* pList = &pStatus->uidList; + + pList->currentIndex = 0; + uint64_t uid = pList->tableUidList[0]; + pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); } -static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) { - int32_t index = 0; - int32_t total = taosHashGetSize(pStatus->pTableMap); - - void* p = taosHashIterate(pStatus->pTableMap, NULL); - while (p != NULL) { - STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p; - pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid; - p = taosHashIterate(pStatus->pTableMap, p); - } - - taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc); -} - -static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) { - SReaderStatus* pStatus = &pReader->status; - - int32_t total = taosHashGetSize(pStatus->pTableMap); - if (total == 0) { - return TSDB_CODE_SUCCESS; - } - - if (pOrderCheckInfo->tableUidList == NULL) { - pOrderCheckInfo->currentIndex = 0; - pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t)); - if (pOrderCheckInfo->tableUidList == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); - uint64_t uid = pOrderCheckInfo->tableUidList[0]; - pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - } else { - if (pStatus->pTableIter == NULL) { // it is the last block of a new file - pOrderCheckInfo->currentIndex = 0; - uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex]; - pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - - // the tableMap has already updated - if (pStatus->pTableIter == NULL) { - void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t)); - if (p == NULL) { - return TSDB_CODE_OUT_OF_MEMORY; - } - - pOrderCheckInfo->tableUidList = p; - extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order); - - uid = pOrderCheckInfo->tableUidList[0]; - pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid)); - } - } - } - - return TSDB_CODE_SUCCESS; -} - -static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus* pStatus) { +static bool moveToNextTable(STableUidList* pOrderedCheckInfo, SReaderStatus* pStatus) { pOrderedCheckInfo->currentIndex += 1; if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) { pStatus->pTableIter = NULL; @@ -2906,11 +2885,10 @@ static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader; + STableUidList* pUidList = &pStatus->uidList; - SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo; - int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader); - if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) { - return code; + if (taosHashGetSize(pStatus->pTableMap) == 0) { + return TSDB_CODE_SUCCESS; } SSDataBlock* pResBlock = pReader->pResBlock; @@ -2921,7 +2899,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader); if (!hasVal) { - bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus); + bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { return TSDB_CODE_SUCCESS; } @@ -2956,7 +2934,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { } // current table is exhausted, let's try next table - bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus); + bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { return TSDB_CODE_SUCCESS; } @@ -3061,14 +3039,15 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; + STableUidList* pUidList = &pStatus->uidList; while (1) { - if (pStatus->pTableIter == NULL) { - pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL); - if (pStatus->pTableIter == NULL) { - return TSDB_CODE_SUCCESS; - } - } + // if (pStatus->pTableIter == NULL) { + // pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL); + // if (pStatus->pTableIter == NULL) { + // return TSDB_CODE_SUCCESS; + // } + // } STableBlockScanInfo** pBlockScanInfo = pStatus->pTableIter; initMemDataIterator(*pBlockScanInfo, pReader); @@ -3083,9 +3062,9 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) { return TSDB_CODE_SUCCESS; } - // current table is exhausted, let's try the next table - pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter); - if (pStatus->pTableIter == NULL) { + // current table is exhausted, let's try next table + bool hasNexTable = moveToNextTable(pUidList, pStatus); + if (!hasNexTable) { return TSDB_CODE_SUCCESS; } } @@ -3114,8 +3093,7 @@ static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) { SBlockNumber num = {0}; - - int32_t code = moveToNextFile(pReader, &num); + int32_t code = moveToNextFile(pReader, &num); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -3132,6 +3110,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl } else { // no block data, only last block exists tBlockDataReset(&pReader->status.fileBlockData); resetDataBlockIterator(pBlockIter, pReader->order); + resetTableListIndex(&pReader->status); } // set the correct start position according to the query time window @@ -3172,6 +3151,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have data files, let's start check the last block file if exists if (pBlockIter->numOfBlocks == 0) { + resetTableListIndex(&pReader->status); goto _begin; } } @@ -3208,6 +3188,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { tBlockDataReset(pBlockData); resetDataBlockIterator(pBlockIter, pReader->order); + resetTableListIndex(&pReader->status); goto _begin; } else { code = initForFirstBlockInFile(pReader, pBlockIter); @@ -3219,6 +3200,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) { // this file does not have blocks, let's start check the last block file if (pBlockIter->numOfBlocks == 0) { + resetTableListIndex(&pReader->status); goto _begin; } } @@ -3910,11 +3892,15 @@ int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t n ASSERT(size >= num); taosHashClear(pReader->status.pTableMap); + STableUidList* pUidList = &pReader->status.uidList; + pUidList->currentIndex = 0; STableKeyInfo* pList = (STableKeyInfo*)pTableList; for (int32_t i = 0; i < num; ++i) { STableBlockScanInfo* pInfo = getPosInBlockInfoBuf(&pReader->blockInfoBuf, i); pInfo->uid = pList[i].uid; + pUidList->tableUidList[i] = pList[i].uid; + taosHashPut(pReader->status.pTableMap, &pInfo->uid, sizeof(uint64_t), &pInfo, POINTER_BYTES); } @@ -3938,18 +3924,24 @@ void* tsdbGetIvtIdx(SMeta* pMeta) { uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; } static int32_t doOpenReaderImpl(STsdbReader* pReader) { - SDataBlockIter* pBlockIter = &pReader->status.blockIter; + SReaderStatus* pStatus = &pReader->status; + SDataBlockIter* pBlockIter = &pStatus->blockIter; - initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); - resetDataBlockIterator(&pReader->status.blockIter, pReader->order); + initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); + resetDataBlockIterator(&pStatus->blockIter, pReader->order); - // no data in files, let's try buffer in memory - if (pReader->status.fileIter.numOfFiles == 0) { - pReader->status.loadFromFile = false; - return TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; + if (pStatus->fileIter.numOfFiles == 0) { + pStatus->loadFromFile = false; } else { - return initForFirstBlockInFile(pReader, pBlockIter); + code = initForFirstBlockInFile(pReader, pBlockIter); } + + if (!pStatus->loadFromFile) { + resetTableListIndex(pStatus); + } + + return code; } // ====================================== EXPOSED APIs ====================================== @@ -3961,11 +3953,9 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL pCond->twindows.ekey -= 1; } - int32_t capacity = 0; - if (pResBlock == NULL) { - capacity = 4096; - } else { - capacity = pResBlock->info.capacity; + int32_t capacity = pVnode->config.tsdbCfg.maxRows; + if (pResBlock != NULL) { + blockDataEnsureCapacity(pResBlock, capacity); } int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, capacity, pResBlock, idstr); @@ -4038,7 +4028,8 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL } STsdbReader* p = (pReader->innerReader[0] != NULL) ? pReader->innerReader[0] : pReader; - pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, numOfTables); + pReader->status.pTableMap = + createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, &pReader->status.uidList, numOfTables); if (pReader->status.pTableMap == NULL) { *ppReader = NULL; code = TSDB_CODE_OUT_OF_MEMORY; @@ -4053,6 +4044,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL _err: tsdbError("failed to create data reader, code:%s %s", tstrerror(code), idstr); tsdbReaderClose(pReader); + *ppReader = NULL; // reset the pointer value. return code; } @@ -4067,6 +4059,7 @@ void tsdbReaderClose(STsdbReader* pReader) { STsdbReader* p = pReader->innerReader[0]; p->status.pTableMap = NULL; + p->status.uidList.tableUidList = NULL; p->pReadSnap = NULL; p->pSchema = NULL; p->pMemSchema = NULL; @@ -4074,6 +4067,7 @@ void tsdbReaderClose(STsdbReader* pReader) { p = pReader->innerReader[1]; p->status.pTableMap = NULL; + p->status.uidList.tableUidList = NULL; p->pReadSnap = NULL; p->pSchema = NULL; p->pMemSchema = NULL; @@ -4127,7 +4121,7 @@ void tsdbReaderClose(STsdbReader* pReader) { tsdbUninitReaderLock(pReader); - taosMemoryFree(pReader->status.uidCheckInfo.tableUidList); + taosMemoryFree(pReader->status.uidList.tableUidList); SIOCostSummary* pCost = &pReader->cost; SFilesetIter* pFilesetIter = &pReader->status.fileIter; @@ -4321,12 +4315,14 @@ int32_t tsdbReaderResume(STsdbReader* pReader) { // we need only one row pPrevReader->capacity = 1; pPrevReader->status.pTableMap = pReader->status.pTableMap; + pPrevReader->status.uidList = pReader->status.uidList; pPrevReader->pSchema = pReader->pSchema; pPrevReader->pMemSchema = pReader->pMemSchema; pPrevReader->pReadSnap = pReader->pReadSnap; pNextReader->capacity = 1; pNextReader->status.pTableMap = pReader->status.pTableMap; + pNextReader->status.uidList = pReader->status.uidList; pNextReader->pSchema = pReader->pSchema; pNextReader->pMemSchema = pReader->pMemSchema; pNextReader->pReadSnap = pReader->pReadSnap; @@ -4368,6 +4364,7 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) { if (pBlock->info.rows > 0) { return true; } else { + resetTableListIndex(&pReader->status); buildBlockFromBufferSequentially(pReader); return pBlock->info.rows > 0; } @@ -4378,7 +4375,7 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) { } bool tsdbNextDataBlock(STsdbReader* pReader) { - if (isEmptyQueryTimeWindow(&pReader->window)) { + if (isEmptyQueryTimeWindow(&pReader->window) || pReader->step == EXTERNAL_ROWS_NEXT) { return false; } @@ -4427,7 +4424,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) { return ret; } - if (pReader->innerReader[1] != NULL && pReader->step == EXTERNAL_ROWS_MAIN) { + if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) { // prepare for the next row scan int32_t code = doOpenReaderImpl(pReader->innerReader[1]); resetAllDataBlockScanInfo(pReader->innerReader[1]->status.pTableMap, pReader->window.ekey); @@ -4435,16 +4432,16 @@ bool tsdbNextDataBlock(STsdbReader* pReader) { return code; } - bool ret1 = doTsdbNextDataBlock(pReader->innerReader[1]); + ret = doTsdbNextDataBlock(pReader->innerReader[1]); pReader->step = EXTERNAL_ROWS_NEXT; - if (ret1) { + if (ret) { pStatus = &pReader->innerReader[1]->status; if (pStatus->composedDataBlock) { qTrace("tsdb/read: %p, unlock read mutex", pReader); tsdbReleaseReader(pReader); } - return ret1; + return ret; } } @@ -4612,8 +4609,6 @@ SSDataBlock* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) { } int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { - SReaderStatus* pStatus = &pReader->status; - qTrace("tsdb/reader-reset: %p, take read mutex", pReader); tsdbAcquireReader(pReader); @@ -4629,12 +4624,14 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { return TSDB_CODE_SUCCESS; } - SDataBlockIter* pBlockIter = &pReader->status.blockIter; + SReaderStatus* pStatus = &pReader->status; + + SDataBlockIter* pBlockIter = &pStatus->blockIter; pReader->order = pCond->order; pReader->type = TIMEWINDOW_RANGE_CONTAINED; - pReader->status.loadFromFile = true; - pReader->status.pTableIter = NULL; + pStatus->loadFromFile = true; + pStatus->pTableIter = NULL; pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows); // allocate buffer in order to load data blocks from file @@ -4643,19 +4640,21 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID; tsdbDataFReaderClose(&pReader->pFileReader); - int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap); + int32_t numOfTables = taosHashGetSize(pStatus->pTableMap); - initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); + initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader); resetDataBlockIterator(pBlockIter, pReader->order); + resetTableListIndex(&pReader->status); int64_t ts = ASCENDING_TRAVERSE(pReader->order) ? pReader->window.skey - 1 : pReader->window.ekey + 1; - resetAllDataBlockScanInfo(pReader->status.pTableMap, ts); + resetAllDataBlockScanInfo(pStatus->pTableMap, ts); int32_t code = 0; // no data in files, let's try buffer in memory - if (pReader->status.fileIter.numOfFiles == 0) { - pReader->status.loadFromFile = false; + if (pStatus->fileIter.numOfFiles == 0) { + pStatus->loadFromFile = false; + resetTableListIndex(pStatus); } else { code = initForFirstBlockInFile(pReader, pBlockIter); if (code != TSDB_CODE_SUCCESS) { @@ -4739,7 +4738,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa hasNext = blockIteratorNext(&pStatus->blockIter, pReader->idStr); } else { code = initForFirstBlockInFile(pReader, pBlockIter); - if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) { + if ((code != TSDB_CODE_SUCCESS) || (pStatus->loadFromFile == false)) { break; } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index c7bce6182a..50fd9d7aa7 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -47,15 +47,21 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd taosMemoryFree(pFD); goto _exit; } - if (taosStatFile(path, &pFD->szFile, NULL) < 0) { - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(pFD->pBuf); - taosCloseFile(&pFD->pFD); - taosMemoryFree(pFD); - goto _exit; + + // not check file size when reading data files. + if (flag != TD_FILE_READ) { + if (taosStatFile(path, &pFD->szFile, NULL) < 0) { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD->pBuf); + taosCloseFile(&pFD->pFD); + taosMemoryFree(pFD); + goto _exit; + } + + ASSERT(pFD->szFile % szPage == 0); + pFD->szFile = pFD->szFile / szPage; } - ASSERT(pFD->szFile % szPage == 0); - pFD->szFile = pFD->szFile / szPage; + *ppFD = pFD; _exit: @@ -103,7 +109,7 @@ _exit: static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno) { int32_t code = 0; - ASSERT(pgno <= pFD->szFile); + // ASSERT(pgno <= pFD->szFile); // seek int64_t offset = PAGE_OFFSET(pgno, pFD->szPage); @@ -175,7 +181,7 @@ static int32_t tsdbReadFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t int32_t szPgCont = PAGE_CONTENT_SIZE(pFD->szPage); int64_t bOffset = fOffset % pFD->szPage; - ASSERT(pgno && pgno <= pFD->szFile); + // ASSERT(pgno && pgno <= pFD->szFile); ASSERT(bOffset < szPgCont); while (n < size) { diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 9f57887d48..1a98134d70 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -328,10 +328,6 @@ static int32_t tsdbSnapCmprTombData(STsdbSnapReader* pReader, uint8_t** ppData) _exit: if (code) { tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code)); - if (pData) { - taosMemoryFree(pData); - pData = NULL; - } } *ppData = pData; return code; @@ -404,7 +400,7 @@ static int32_t tsdbSnapReadTombData(STsdbSnapReader* pReader, uint8_t** ppData) } while (pDelInfo && pDelInfo->suid == pReader->tbid.suid && pDelInfo->uid == pReader->tbid.uid) { - if (taosArrayPush(pReader->aDelData, &pDelInfo->delData) < 0) { + if (taosArrayPush(pReader->aDelData, &pDelInfo->delData) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); } @@ -1252,7 +1248,7 @@ static int32_t tsdbSnapWriteDelTableData(STsdbSnapWriter* pWriter, TABLEID* pId, SDelData delData; n += tGetDelData(pData + n, &delData); - if (taosArrayPush(pWriter->aDelData, &delData) < 0) { + if (taosArrayPush(pWriter->aDelData, &delData) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); } @@ -1420,6 +1416,7 @@ _exit: tBlockDataDestroy(&pWriter->bData); tBlockDataDestroy(&pWriter->inData); tsdbFSDestroy(&pWriter->fs); + taosMemoryFree(pWriter); pWriter = NULL; } } else { diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 24dcae91d9..36834ce921 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -116,12 +116,7 @@ int32_t tMapDataToArray(SMapData *pMapData, int32_t itemSize, int32_t (*tGetItem } _exit: - if (code) { - *ppArray = NULL; - if (pArray) taosArrayDestroy(pArray); - } else { - *ppArray = pArray; - } + *ppArray = pArray; return code; } @@ -1051,9 +1046,7 @@ static int32_t tsdbMergeSkyline(SArray *pSkyline1, SArray *pSkyline2, SArray *pS i2++; } - taosArraySetSize(pSkyline, TARRAY_ELEM_IDX(pSkyline, pItem)); - -_exit: + pSkyline->size = TARRAY_ELEM_IDX(pSkyline, pItem); return code; } @@ -1235,14 +1228,22 @@ int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int32_t iColumn = 1; STColumn *pTColumn = &pTSchema->columns[iColumn]; for (int32_t iCid = 0; iCid < nCid; iCid++) { - ASSERT(pTColumn); + if (ASSERTS(pTColumn != NULL, "invalid input param")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } + while (pTColumn->colId < aCid[iCid]) { iColumn++; ASSERT(iColumn < pTSchema->numOfCols); pTColumn = &pTSchema->columns[iColumn]; } - ASSERT(pTColumn->colId == aCid[iCid]); + if (ASSERTS(pTColumn->colId == aCid[iCid], "invalid input param")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } + tColDataInit(&pBlockData->aColData[iCid], pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0); diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 301b504346..bd2d263804 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -31,7 +31,9 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq2 *pMsg, SSubmitRsp2 int32_t affectedrows = 0; int32_t numOfRows = 0; - ASSERT(pTsdb->mem != NULL); + if (ASSERTS(pTsdb->mem != NULL, "vgId:%d, mem is NULL", TD_VID(pTsdb->pVnode))) { + return -1; + } if (pMsg) { arrSize = taosArrayGetSize(pMsg->aSubmitTbData); diff --git a/source/dnode/vnode/src/vnd/vnodeCompact.c b/source/dnode/vnode/src/vnd/vnodeCompact.c index 14d893042d..16e39d75dc 100644 --- a/source/dnode/vnode/src/vnd/vnodeCompact.c +++ b/source/dnode/vnode/src/vnd/vnodeCompact.c @@ -36,8 +36,8 @@ static int32_t vnodeCompactTask(void *param) { vnodeCommitInfo(dir); _exit: - taosMemoryFree(pInfo); tsem_post(&pInfo->pVnode->canCommit); + taosMemoryFree(pInfo); return code; } static int32_t vnodePrepareCompact(SVnode *pVnode, SCompactInfo *pInfo) { @@ -59,9 +59,17 @@ static int32_t vnodePrepareCompact(SVnode *pVnode, SCompactInfo *pInfo) { snprintf(dir, TSDB_FILENAME_LEN, "%s", pVnode->path); } - vnodeLoadInfo(dir, &info); + if (vnodeLoadInfo(dir, &info) < 0) { + code = terrno; + goto _exit; + } + info.state.commitID = pInfo->commitID; - vnodeSaveInfo(dir, &info); + + if (vnodeSaveInfo(dir, &info) < 0) { + code = terrno; + goto _exit; + } _exit: if (code) { diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 11d8583ff8..c7d155be0d 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -48,7 +48,7 @@ int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) { info.state.applied = -1; info.state.commitID = 0; - vInfo("vgId:%d, save config while create", pCfg->vgId); + vInfo("vgId:%d, save config while create", info.config.vgId); if (vnodeSaveInfo(dir, &info) < 0 || vnodeCommitInfo(dir) < 0) { vError("vgId:%d, failed to save vnode config since %s", pCfg ? pCfg->vgId : 0, tstrerror(terrno)); return -1; @@ -124,7 +124,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr while (1) { const STfsFile *tsdbFile = tfsReaddir(tsdbDir); if (tsdbFile == NULL) break; - if (tsdbFile->rname == NULL) continue; + if (tsdbFile->rname[0] == '\0') continue; tstrncpy(oldRname, tsdbFile->rname, TSDB_FILENAME_LEN); char *tsdbFilePrefixPos = strstr(oldRname, tsdbFilePrefix); @@ -365,7 +365,7 @@ _err: if (pVnode->pWal) walClose(pVnode->pWal); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); if (pVnode->pSma) smaClose(pVnode->pSma); - if (pVnode->pMeta) metaClose(pVnode->pMeta); + if (pVnode->pMeta) metaClose(&pVnode->pMeta); if (pVnode->freeList) vnodeCloseBufPool(pVnode); tsem_destroy(&(pVnode->canCommit)); @@ -389,7 +389,7 @@ void vnodeClose(SVnode *pVnode) { tqClose(pVnode->pTq); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); smaClose(pVnode->pSma); - metaClose(pVnode->pMeta); + if (pVnode->pMeta) metaClose(&pVnode->pMeta); vnodeCloseBufPool(pVnode); tsem_post(&pVnode->canCommit); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 2437afbacc..6f9f701ac2 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -141,7 +141,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int *(int64_t *)(pCoder->data + pCoder->pos) = uid; pCoder->pos += sizeof(int64_t); } else { - tDecodeI64(pCoder, &submitTbData.uid); + if (tDecodeI64(pCoder, &submitTbData.uid) < 0) { + code = TSDB_CODE_INVALID_MSG; + TSDB_CHECK_CODE(code, lino, _exit); + } } if (tDecodeI32v(pCoder, &submitTbData.sver) < 0) { @@ -168,6 +171,11 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int SColData colData = {0}; pCoder->pos += tGetColData(pCoder->data + pCoder->pos, &colData); + if (colData.flag != HAS_VALUE) { + code = TSDB_CODE_INVALID_MSG; + goto _exit; + } + for (int32_t iRow = 0; iRow < colData.nVal; iRow++) { if (((TSKEY *)colData.pData)[iRow] < minKey || ((TSKEY *)colData.pData)[iRow] > maxKey) { code = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; @@ -440,10 +448,13 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp walApplyVer(pVnode->pWal, version); + /*vInfo("vgId:%d, push msg begin", pVnode->config.vgId);*/ if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) { + /*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/ vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } + /*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/ // commit if need if (needCommit) { @@ -479,7 +490,6 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { vTrace("message in vnode query queue is processing"); - // if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsLeader(pVnode)) { if ((pMsg->msgType == TDMT_SCH_QUERY) && !syncIsReadyForRead(pVnode->sync)) { vnodeRedirectRpcMsg(pVnode, pMsg, terrno); return 0; @@ -503,7 +513,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) && !syncIsReadyForRead(pVnode->sync)) { - // !vnodeIsLeader(pVnode)) { vnodeRedirectRpcMsg(pVnode, pMsg, terrno); return 0; } diff --git a/source/dnode/vnode/test/tsdbSmaTest.cpp b/source/dnode/vnode/test/tsdbSmaTest.cpp index be101059f2..43eaacfff9 100644 --- a/source/dnode/vnode/test/tsdbSmaTest.cpp +++ b/source/dnode/vnode/test/tsdbSmaTest.cpp @@ -283,7 +283,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) { metaRemoveSmaFromDb(pMeta, indexUid2); tDestroyTSma(&tSma); - metaClose(pMeta); + metaClose(&pMeta); } #endif @@ -577,9 +577,9 @@ TEST(testCase, tSma_Data_Insert_Query_Test) { tDestroyTSma(&tSma); tfsClose(pTsdb->pTfs); tsdbClose(pTsdb); - metaClose(pMeta); + metaClose(&pMeta); } #endif -#pragma GCC diagnostic pop \ No newline at end of file +#pragma GCC diagnostic pop diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 7ee7a24f97..8fc7df63be 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -300,7 +300,7 @@ typedef struct SCtgSubRes { ctgSubTaskCbFp fp; } SCtgSubRes; -typedef struct SCtgTask { +struct SCtgTask { CTG_TASK_TYPE type; int32_t taskId; SCtgJob* pJob; @@ -313,7 +313,7 @@ typedef struct SCtgTask { SRWLatch lock; SArray* pParents; SCtgSubRes subRes; -} SCtgTask; +}; typedef struct SCtgTaskReq { SCtgTask* pTask; diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 325d6e0e46..89e92b0cc8 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -1707,9 +1707,7 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } - pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx)); - taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum); - + pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum); for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); SName* pName = ctgGetFetchName(pCtx->pNames, pFetch); @@ -1844,7 +1842,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) { ctgAddFetch(&pCtx->pFetchs, i, -1, &fetchIdx, baseResIdx, 0); baseResIdx += taosArrayGetSize(pReq->pTables); - taosArraySetSize(pCtx->pResList, baseResIdx); + int32_t inc = baseResIdx - taosArrayGetSize(pCtx->pResList); + for(int32_t j = 0; j < inc; ++j) { + taosArrayPush(pCtx->pResList, &(SMetaRes){0}); + } } } @@ -1856,8 +1857,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) { return TSDB_CODE_SUCCESS; } - pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx)); - taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum); + pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum); for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 6e4077eae0..7ff8afd6a5 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -493,11 +493,9 @@ int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCt //ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); - if (tbCache) { - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); - taosHashRelease(dbCache->tbCache, tbCache); - *pTb = NULL; - } + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + taosHashRelease(dbCache->tbCache, tbCache); + *pTb = NULL; ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", ctx->pName->tname, ctx->tbInfo.tbType, dbFName); @@ -1554,8 +1552,8 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam SCtgTbCache cache = {0}; cache.pMeta = meta; if (taosHashPut(dbCache->tbCache, tbName, strlen(tbName), &cache, sizeof(SCtgTbCache)) != 0) { - taosMemoryFree(meta); ctgError("taosHashPut new tbCache failed, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType); + taosMemoryFree(meta); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } @@ -2480,20 +2478,20 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe ctgDebug("db %s not in cache", dbFName); for (int32_t i = 0; i < tbNum; ++i) { ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaData){0}); } return TSDB_CODE_SUCCESS; } for (int32_t i = 0; i < tbNum; ++i) { - SName *pName = taosArrayGet(pList, i); + pName = taosArrayGet(pList, i); pCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname)); if (NULL == pCache) { ctgDebug("tb %s not in cache, dbFName:%s", pName->tname, dbFName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); continue; } @@ -2503,7 +2501,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe CTG_UNLOCK(CTG_READ, &pCache->metaLock); ctgDebug("tb %s meta not in cache, dbFName:%s", pName->tname, dbFName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); continue; } @@ -2576,7 +2574,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe if (NULL == stName) { ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", pTableMeta->suid, dbFName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); continue; @@ -2588,7 +2586,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe taosHashRelease(dbCache->stbCache, stName); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); continue; @@ -2603,7 +2601,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe taosHashRelease(dbCache->tbCache, pCache); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); @@ -2619,7 +2617,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe nctx.tbInfo.suid); ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag); - taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1); + taosArrayPush(ctx->pResList, &(SMetaRes){0}); taosMemoryFreeClear(pTableMeta); diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index e0d2276e6f..f99c7de93d 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -44,6 +44,8 @@ typedef struct SGroupResInfo { int32_t index; SArray* pRows; // SArray + char* pBuf; + bool freeItem; } SGroupResInfo; typedef struct SResultRow { @@ -115,10 +117,6 @@ struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t i static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos, bool forUpdate) { SFilePage* bufPage = (SFilePage*)getBufPage(pBuf, pos->pageId); - if (NULL == bufPage) { - return NULL; - } - if (forUpdate) { setBufPageDirty(bufPage, true); } diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 294424746a..60e4e85fb8 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -149,6 +149,10 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { // check if it is a group by tbname if ((pInfo->retrieveType & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) { + if (isTaskKilled(pTaskInfo)) { + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); + } + if (pInfo->indexOfBufferedRes >= pInfo->pBufferredRes->info.rows) { blockDataCleanup(pInfo->pBufferredRes); taosArrayClear(pInfo->pUidList); @@ -207,6 +211,10 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { size_t totalGroups = tableListGetOutputGroups(pTableList); while (pInfo->currentGroupIndex < totalGroups) { + if (isTaskKilled(pTaskInfo)) { + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); + } + STableKeyInfo* pList = NULL; int32_t num = 0; @@ -215,8 +223,15 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { T_LONG_JMP(pTaskInfo->env, code); } - tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num, - taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, pTaskInfo->id.str); + code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num, + taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, + pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + pInfo->currentGroupIndex += 1; + taosArrayClear(pInfo->pUidList); + continue; + } + taosArrayClear(pInfo->pUidList); code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 7522771e7b..639adb7ec6 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -50,11 +50,13 @@ typedef enum { } FilterCondType; static FilterCondType checkTagCond(SNode* cond); -static int32_t removeInvalidTable(SArray* uids, SHashObj* tags); -static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SHashObj* tags); -static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond); -static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, - SNode* pTagIndexCond, STableListInfo* pListInfo); +static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond); +static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond); + +static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, + SNode* pTagIndexCond, STableListInfo* pListInfo, const char* idstr); +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, + void* metaHandle); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -95,15 +97,18 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) { return rowSize; } +static void freeEx(void* p) { taosMemoryFree(*(void**)p); } + void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) { - assert(pGroupResInfo != NULL); - - for (int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) { - SResKeyPos* pRes = taosArrayGetP(pGroupResInfo->pRows, i); - taosMemoryFree(pRes); + taosMemoryFreeClear(pGroupResInfo->pBuf); + if (pGroupResInfo->freeItem) { + // taosArrayDestroy(pGroupResInfo->pRows); + taosArrayDestroyEx(pGroupResInfo->pRows, freeEx); + pGroupResInfo->freeItem = false; + pGroupResInfo->pRows = NULL; + } else { + pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows); } - - pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows); pGroupResInfo->index = 0; } @@ -133,26 +138,40 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in } // extract the result rows information from the hash map - void* pData = NULL; - pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES); + int32_t size = tSimpleHashGetSize(pHashmap); + + void* pData = NULL; + pGroupResInfo->pRows = taosArrayInit(size, POINTER_BYTES); - // todo avoid repeated malloc memory size_t keyLen = 0; int32_t iter = 0; + int32_t bufLen = 0, offset = 0; + + // todo move away and record this during create window + while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) { + /*void* key = */ tSimpleHashGetKey(pData, &keyLen); + bufLen += keyLen + sizeof(SResultRowPosition); + } + + pGroupResInfo->pBuf = taosMemoryMalloc(bufLen); + + iter = 0; while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) { void* key = tSimpleHashGetKey(pData, &keyLen); - SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition)); + SResKeyPos* p = (SResKeyPos*)(pGroupResInfo->pBuf + offset); p->groupId = *(uint64_t*)key; p->pos = *(SResultRowPosition*)pData; memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t)); taosArrayPush(pGroupResInfo->pRows, &p); + + offset += keyLen + sizeof(struct SResultRowPosition); } if (order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC) { __compar_fn_t fn = (order == TSDB_ORDER_ASC) ? resultrowComparAsc : resultrowComparDesc; - int32_t size = POINTER_BYTES; + size = POINTER_BYTES; taosSort(pGroupResInfo->pRows->pData, taosArrayGetSize(pGroupResInfo->pRows), size, fn); } @@ -165,6 +184,7 @@ void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL taosArrayDestroy(pGroupResInfo->pRows); } + pGroupResInfo->freeItem = true; pGroupResInfo->pRows = pArrayList; pGroupResInfo->index = 0; ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); @@ -179,7 +199,6 @@ bool hasRemainResults(SGroupResInfo* pGroupResInfo) { } int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { - assert(pGroupResInfo != NULL); if (pGroupResInfo->pRows == 0) { return 0; } @@ -399,159 +418,6 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara return TSDB_CODE_SUCCESS; } -static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* uidList, SNode* pTagCond, - SIdxFltStatus status) { - int32_t code = TSDB_CODE_SUCCESS; - SArray* pBlockList = NULL; - SSDataBlock* pResBlock = NULL; - SHashObj* tags = NULL; - SScalarParam output = {0}; - - tagFilterAssist ctx = {0}; - - ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - if (ctx.colHash == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - if (ctx.cInfoList == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); - - pResBlock = createDataBlock(); - if (pResBlock == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) { - SColumnInfoData colInfo = {0}; - colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i); - blockDataAppendColInfo(pResBlock, &colInfo); - } - - // int64_t stt = taosGetTimestampUs(); - tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - - FilterCondType condType = checkTagCond(pTagCond); - - int32_t filter = optimizeTbnameInCond(metaHandle, suid, uidList, pTagCond, tags); - if (filter == -1) { - if ((condType == FILTER_NO_LOGIC || condType == FILTER_AND) && status != SFLT_NOT_INDEX) { - code = metaGetTableTagsByUids(metaHandle, suid, uidList, tags); - } else { - code = metaGetTableTags(metaHandle, suid, uidList, tags); - } - if (code != TSDB_CODE_SUCCESS) { - qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid); - terrno = code; - goto end; - } - } else { - qDebug("succ to get table tags from meta by tbname in cond, suid:%" PRIu64, suid); - } - if (suid != 0) { - removeInvalidTable(uidList, tags); - } - - int32_t rows = taosArrayGetSize(uidList); - if (rows == 0) { - goto end; - } - - code = blockDataEnsureCapacity(pResBlock, rows); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - goto end; - } - - for (int32_t i = 0; i < rows; i++) { - int64_t* uid = taosArrayGet(uidList, i); - for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { - SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); - - if (pColInfo->info.colId == -1) { // tbname - char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - metaGetTableNameByUid(metaHandle, *uid, str); - colDataAppend(pColInfo, i, str, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); -#endif - } else { - void* tag = taosHashGet(tags, uid, sizeof(int64_t)); - if (tag == NULL) { - continue; - } - STagVal tagVal = {0}; - tagVal.cid = pColInfo->info.colId; - const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); - - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { - colDataAppend(pColInfo, i, p, true); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { - colDataAppend(pColInfo, i, p, false); - } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); - varDataSetLen(tmp, tagVal.nData); - memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); - colDataAppend(pColInfo, i, tmp, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp + 2); -#endif - taosMemoryFree(tmp); - } else { - colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); -#if TAG_FILTER_DEBUG - if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { - qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { - qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); - } -#endif - } - } - } - } - - pResBlock->info.rows = rows; - - // int64_t st1 = taosGetTimestampUs(); - // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); - - pBlockList = taosArrayInit(2, POINTER_BYTES); - taosArrayPush(pBlockList, &pResBlock); - - SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; - code = createResultData(&type, rows, &output); - if (code != TSDB_CODE_SUCCESS) { - terrno = code; - qError("failed to create result, reason:%s", tstrerror(code)); - goto end; - } - - code = scalarCalculate(pTagCond, pBlockList, &output); - if (code != TSDB_CODE_SUCCESS) { - qError("failed to calculate scalar, reason:%s", tstrerror(code)); - terrno = code; - goto end; - } - // int64_t st2 = taosGetTimestampUs(); - // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1); - -end: - taosHashCleanup(tags); - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); - blockDataDestroy(pResBlock); - taosArrayDestroy(pBlockList); - return output.columnData; -} - static void releaseColInfoData(void* pCol) { if (pCol) { SColumnInfoData* col = (SColumnInfoData*)pCol; @@ -560,12 +426,17 @@ static void releaseColInfoData(void* pCol) { } } +void freeItem(void* p) { + STUidTagInfo* pInfo = p; + if (pInfo->pTagVal != NULL) { + taosMemoryFree(pInfo->pTagVal); + } +} + int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) { int32_t code = TSDB_CODE_SUCCESS; SArray* pBlockList = NULL; SSDataBlock* pResBlock = NULL; - SHashObj* tags = NULL; - SArray* uidList = NULL; void* keyBuf = NULL; SArray* groupData = NULL; @@ -594,89 +465,26 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis REPLACE_NODE(pNode); } - pResBlock = createDataBlock(); - if (pResBlock == NULL) { - code = TSDB_CODE_OUT_OF_MEMORY; - goto end; - } - - for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) { - SColumnInfoData colInfo = {0}; - colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i); - blockDataAppendColInfo(pResBlock, &colInfo); - } - - uidList = taosArrayInit(rows, sizeof(uint64_t)); + SArray* pUidTagList = taosArrayInit(8, sizeof(STUidTagInfo)); for (int32_t i = 0; i < rows; ++i) { STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i); - taosArrayPush(uidList, &pkeyInfo->uid); + STUidTagInfo info = {.uid = pkeyInfo->uid}; + taosArrayPush(pUidTagList, &info); } // int64_t stt = taosGetTimestampUs(); - tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags); + code = metaGetTableTags(metaHandle, pTableListInfo->suid, pUidTagList); if (code != TSDB_CODE_SUCCESS) { goto end; } - // int64_t stt1 = taosGetTimestampUs(); - // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt); - - code = blockDataEnsureCapacity(pResBlock, rows); - if (code != TSDB_CODE_SUCCESS) { + int32_t numOfTables = taosArrayGetSize(pUidTagList); + pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle); + if (pResBlock == NULL) { + code = terrno; goto end; } - // int64_t st = taosGetTimestampUs(); - for (int32_t i = 0; i < rows; i++) { - int64_t* uid = taosArrayGet(uidList, i); - for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) { - SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); - - if (pColInfo->info.colId == -1) { // tbname - char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; - metaGetTableNameByUid(metaHandle, *uid, str); - colDataAppend(pColInfo, i, str, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); -#endif - } else { - void* tag = taosHashGet(tags, uid, sizeof(int64_t)); - ASSERT(tag); - - STagVal tagVal = {0}; - tagVal.cid = pColInfo->info.colId; - const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal); - - if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { - colDataAppend(pColInfo, i, p, true); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { - colDataAppend(pColInfo, i, p, false); - } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { - char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1); - varDataSetLen(tmp, tagVal.nData); - memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); - colDataAppend(pColInfo, i, tmp, false); -#if TAG_FILTER_DEBUG - qDebug("tagfilter varch:%s", tmp + 2); -#endif - taosMemoryFree(tmp); - } else { - colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); -#if TAG_FILTER_DEBUG - if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { - qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); - } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { - qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); - } -#endif - } - } - } - } - - pResBlock->info.rows = rows; - // int64_t st1 = taosGetTimestampUs(); // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); @@ -784,12 +592,11 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis end: taosMemoryFreeClear(keyBuf); - taosHashCleanup(tags); taosHashCleanup(ctx.colHash); taosArrayDestroy(ctx.cInfoList); blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); - taosArrayDestroy(uidList); + taosArrayDestroyEx(pUidTagList, freeItem); taosArrayDestroyP(groupData, releaseColInfoData); return code; } @@ -858,6 +665,17 @@ static int tableUidCompare(const void* a, const void* b) { return u1 < u2 ? -1 : 1; } +static int32_t filterTableInfoCompare(const void* a, const void* b) { + STUidTagInfo* p1 = (STUidTagInfo*)a; + STUidTagInfo* p2 = (STUidTagInfo*)b; + + if (p1->uid == p2->uid) { + return 0; + } + + return p1->uid < p2->uid ? -1 : 1; +} + static FilterCondType checkTagCond(SNode* cond) { if (nodeType(cond) == QUERY_NODE_OPERATOR) { return FILTER_NO_LOGIC; @@ -867,17 +685,16 @@ static FilterCondType checkTagCond(SNode* cond) { } return FILTER_OTHER; } -static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* cond, SHashObj* tags) { + +static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* cond) { int32_t ret = -1; - if (nodeType(cond) == QUERY_NODE_OPERATOR) { - ret = optimizeTbnameInCondImpl(metaHandle, suid, list, cond); - if (ret != -1) { - metaGetTableTagsByUids(metaHandle, suid, list, tags); - removeInvalidTable(list, tags); - } + int32_t ntype = nodeType(cond); + + if (ntype == QUERY_NODE_OPERATOR) { + ret = optimizeTbnameInCondImpl(metaHandle, list, cond); } - if (nodeType(cond) != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) { + if (ntype != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) { return ret; } @@ -893,45 +710,25 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list SListCell* cell = pList->pHead; for (int i = 0; i < len; i++) { if (cell == NULL) break; - if (optimizeTbnameInCondImpl(metaHandle, suid, list, cell->pNode) == 0) { + if (optimizeTbnameInCondImpl(metaHandle, list, cell->pNode) == 0) { hasTbnameCond = true; break; } cell = cell->pNext; } - taosArraySort(list, tableUidCompare); - taosArrayRemoveDuplicate(list, tableUidCompare, NULL); + taosArraySort(list, filterTableInfoCompare); + taosArrayRemoveDuplicate(list, filterTableInfoCompare, NULL); if (hasTbnameCond) { - ret = metaGetTableTagsByUids(metaHandle, suid, list, tags); - removeInvalidTable(list, tags); + ret = metaGetTableTagsByUids(metaHandle, suid, list); } return ret; } -/* - * handle invalid uid - */ -static int32_t removeInvalidTable(SArray* uids, SHashObj* tags) { - if (taosArrayGetSize(uids) <= 0) return 0; - - SArray* validUid = taosArrayInit(taosArrayGetSize(uids), sizeof(int64_t)); - - for (int32_t i = 0; i < taosArrayGetSize(uids); i++) { - int64_t* uid = taosArrayGet(uids, i); - if (taosHashGet(tags, uid, sizeof(int64_t)) != NULL) { - taosArrayPush(validUid, uid); - } - } - - taosArraySwap(uids, validUid); - taosArrayDestroy(validUid); - return 0; -} - -static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond) { +// only return uid that does not contained in pExistedUidList +static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond) { if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) { return -1; } @@ -954,12 +751,13 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* SArray* pTbList = getTableNameList(pList); int32_t numOfTables = taosArrayGetSize(pTbList); SHashObj* uHash = NULL; - size_t listlen = taosArrayGetSize(list); // len > 0 means there already have uids - if (listlen > 0) { - uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); - for (int i = 0; i < listlen; i++) { - int64_t* uid = taosArrayGet(list, i); - taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i)); + + size_t numOfExisted = taosArrayGetSize(pExistedUidList); // len > 0 means there already have uids + if (numOfExisted > 0) { + uHash = taosHashInit(numOfExisted / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); + for (int i = 0; i < numOfExisted; i++) { + STUidTagInfo* pTInfo = taosArrayGet(pExistedUidList, i); + taosHashPut(uHash, &pTInfo->uid, sizeof(uint64_t), &i, sizeof(i)); } } @@ -971,7 +769,8 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* ETableType tbType = TSDB_TABLE_MAX; if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) { if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) { - taosArrayPush(list, &uid); + STUidTagInfo s = {.uid = uid, .name = name, .pTagVal = NULL}; + taosArrayPush(pExistedUidList, &s); } } else { taosArrayDestroy(pTbList); @@ -1008,132 +807,309 @@ static void genTagFilterDigest(const SNode* pTagCond, T_MD5_CTX* pContext) { taosMemoryFree(payload); } -static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* res, SNode* pTagCond, void* metaHandle, +static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, + void* metaHandle) { + SSDataBlock* pResBlock = createDataBlock(); + if (pResBlock == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + for (int32_t i = 0; i < taosArrayGetSize(pColList); ++i) { + SColumnInfoData colInfo = {0}; + colInfo.info = *(SColumnInfo*)taosArrayGet(pColList, i); + blockDataAppendColInfo(pResBlock, &colInfo); + } + + int32_t code = blockDataEnsureCapacity(pResBlock, numOfTables); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return NULL; + } + + pResBlock->info.rows = numOfTables; + + int32_t numOfCols = taosArrayGetSize(pResBlock->pDataBlock); + + for (int32_t i = 0; i < numOfTables; i++) { + STUidTagInfo* p1 = taosArrayGet(pUidTagList, i); + + for (int32_t j = 0; j < numOfCols; j++) { + SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j); + + if (pColInfo->info.colId == -1) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + if (p1->name != NULL) { + STR_TO_VARSTR(str, p1->name); + } else { // name is not retrieved during filter + metaGetTableNameByUid(metaHandle, p1->uid, str); + } + + colDataAppend(pColInfo, i, str, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2); +#endif + } else { + STagVal tagVal = {0}; + tagVal.cid = pColInfo->info.colId; + if (p1->pTagVal == NULL) { + colDataAppendNULL(pColInfo, i); + } + + const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataAppendNULL(pColInfo, i); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataAppend(pColInfo, i, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = alloca(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataAppend(pColInfo, i, tmp, false); +#if TAG_FILTER_DEBUG + qDebug("tagfilter varch:%s", tmp + 2); +#endif + } else { + colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false); +#if TAG_FILTER_DEBUG + if (pColInfo->info.type == TSDB_DATA_TYPE_INT) { + qDebug("tagfilter int:%d", *(int*)(&tagVal.i64)); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) { + qDebug("tagfilter double:%f", *(double*)(&tagVal.i64)); + } +#endif + } + } + } + } + + return pResBlock; +} + +static void doSetQualifiedUid(SArray* pUidList, const SArray* pUidTagList, bool* pResultList) { + taosArrayClear(pUidList); + + int32_t numOfTables = taosArrayGetSize(pUidTagList); + for (int32_t i = 0; i < numOfTables; ++i) { + uint64_t uid = ((STUidTagInfo*)taosArrayGet(pUidTagList, i))->uid; + qDebug("tagfilter get uid:%" PRId64 ", res:%d", uid, pResultList[i]); + + if (pResultList[i]) { + taosArrayPush(pUidList, &uid); + } + } +} + +static void copyExistedUids(SArray* pUidTagList, const SArray* pUidList) { + int32_t numOfExisted = taosArrayGetSize(pUidList); + if (numOfExisted == 0) { + return; + } + + for (int32_t i = 0; i < numOfExisted; ++i) { + uint64_t* uid = taosArrayGet(pUidList, i); + STUidTagInfo info = {.uid = *uid}; + taosArrayPush(pUidTagList, &info); + } +} + +static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SNode* pTagCond, void* metaHandle, SIdxFltStatus status) { if (pTagCond == NULL) { return TSDB_CODE_SUCCESS; } terrno = TDB_CODE_SUCCESS; - SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond, status); - if (terrno != TDB_CODE_SUCCESS) { - colDataDestroy(pColInfoData); - taosMemoryFreeClear(pColInfoData); - taosArrayDestroy(res); - qError("failed to getColInfoResult, code: %s", tstrerror(terrno)); - return terrno; + + int32_t code = TSDB_CODE_SUCCESS; + SArray* pBlockList = NULL; + SSDataBlock* pResBlock = NULL; + SScalarParam output = {0}; + + tagFilterAssist ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + if (ctx.colHash == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto end; } - int32_t i = 0; - int32_t len = taosArrayGetSize(res); + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (ctx.cInfoList == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + goto end; + } - if (pColInfoData != NULL) { - bool* pResult = (bool*)pColInfoData->pData; - SArray* p = taosArrayInit(taosArrayGetSize(res), sizeof(uint64_t)); + nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx); - while (i < len && pColInfoData) { - int64_t* uid = taosArrayGet(res, i); - qDebug("tagfilter get uid:%" PRId64 ", res:%d", *uid, pResult[i]); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; - if (pResult[i]) { - taosArrayPush(p, uid); - } - i += 1; + // int64_t stt = taosGetTimestampUs(); + SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo)); + copyExistedUids(pUidTagList, pUidList); + + FilterCondType condType = checkTagCond(pTagCond); + + int32_t filter = optimizeTbnameInCond(metaHandle, pListInfo->suid, pUidTagList, pTagCond); + if (filter == 0) { // tbname in filter is activated, do nothing and return + taosArrayClear(pUidList); + + int32_t numOfRows = taosArrayGetSize(pUidTagList); + taosArrayEnsureCap(pUidList, numOfRows); + for (int32_t i = 0; i < numOfRows; ++i) { + STUidTagInfo* pInfo = taosArrayGet(pUidTagList, i); + taosArrayPush(pUidList, &pInfo->uid); } - taosArraySwap(res, p); - taosArrayDestroy(p); + terrno = 0; + goto end; + } else { + if ((condType == FILTER_NO_LOGIC || condType == FILTER_AND) && status != SFLT_NOT_INDEX) { + code = metaGetTableTagsByUids(metaHandle, pListInfo->suid, pUidTagList); + } else { + code = metaGetTableTags(metaHandle, pListInfo->suid, pUidTagList); + } + if (code != TSDB_CODE_SUCCESS) { + qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), pListInfo->suid); + terrno = code; + goto end; + } } - colDataDestroy(pColInfoData); - taosMemoryFreeClear(pColInfoData); + int32_t numOfTables = taosArrayGetSize(pUidTagList); + if (numOfTables == 0) { + goto end; + } - return TSDB_CODE_SUCCESS; + pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle); + if (pResBlock == NULL) { + code = terrno; + goto end; + } + + // int64_t st1 = taosGetTimestampUs(); + // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st); + pBlockList = taosArrayInit(2, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + + code = createResultData(&type, numOfTables, &output); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + goto end; + } + + code = scalarCalculate(pTagCond, pBlockList, &output); + if (code != TSDB_CODE_SUCCESS) { + qError("failed to calculate scalar, reason:%s", tstrerror(code)); + terrno = code; + goto end; + } + + doSetQualifiedUid(pUidList, pUidTagList, (bool*)output.columnData->pData); + +end: + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + taosArrayDestroyEx(pUidTagList, freeItem); + + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); + return code; } int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, - STableListInfo* pListInfo) { + STableListInfo* pListInfo, const char* idstr) { int32_t code = TSDB_CODE_SUCCESS; size_t numOfTables = 0; - uint64_t tableUid = pScanNode->uid; pListInfo->suid = pScanNode->suid; - SArray* res = taosArrayInit(8, sizeof(uint64_t)); + SArray* pUidList = taosArrayInit(8, sizeof(uint64_t)); SIdxFltStatus status = SFLT_NOT_INDEX; if (pScanNode->tableType != TSDB_SUPER_TABLE) { - if (metaIsTableExist(metaHandle, tableUid)) { - taosArrayPush(res, &tableUid); + if (metaIsTableExist(metaHandle, pScanNode->uid)) { + taosArrayPush(pUidList, &pScanNode->uid); } - code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle, status); + code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle, status); if (code != TSDB_CODE_SUCCESS) { - return code; - } - } else { - // try to retrieve the result from meta cache - T_MD5_CTX context = {0}; - genTagFilterDigest(pTagCond, &context); - - bool acquired = false; - metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), res, &acquired); - if (acquired) { - qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(res)); goto _end; } + } else { + T_MD5_CTX context = {0}; - if (!pTagCond) { // no tag condition exists, let's fetch all tables of this super table + if (tsTagFilterCache) { + // try to retrieve the result from meta cache + genTagFilterDigest(pTagCond, &context); + + bool acquired = false; + metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pUidList, + &acquired); + if (acquired) { + qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(pUidList)); + goto _end; + } + } + + if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table ASSERT(pTagIndexCond == NULL); - vnodeGetCtbIdList(pVnode, pScanNode->suid, res); + vnodeGetCtbIdList(pVnode, pScanNode->suid, pUidList); } else { // failed to find the result in the cache, let try to calculate the results if (pTagIndexCond) { + void* pIndex = tsdbGetIvtIdx(metaHandle); SIndexMetaArg metaArg = { - .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; + .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = pIndex, .suid = pScanNode->uid}; - code = doFilterTag(pTagIndexCond, &metaArg, res, &status); - if (code != 0 || status == SFLT_NOT_INDEX) { - qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); + SIdxFltStatus status = SFLT_NOT_INDEX; + code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status); + if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake + // qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid); code = TDB_CODE_SUCCESS; } else { - qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(res)); + qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(pUidList)); } } } - code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle, status); + code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle, status); if (code != TSDB_CODE_SUCCESS) { - return code; + goto _end; } // let's add the filter results into meta-cache - numOfTables = taosArrayGetSize(res); - size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t); - char* pPayload = taosMemoryMalloc(size); - *(int32_t*)pPayload = numOfTables; + numOfTables = taosArrayGetSize(pUidList); - if (numOfTables > 0) { - memcpy(pPayload + sizeof(int32_t), taosArrayGet(res, 0), numOfTables * sizeof(uint64_t)); + if (tsTagFilterCache) { + size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t); + char* pPayload = taosMemoryMalloc(size); + + *(int32_t*)pPayload = numOfTables; + if (numOfTables > 0) { + memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t)); + } + + metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); } - - metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1); } _end: - numOfTables = taosArrayGetSize(res); + numOfTables = taosArrayGetSize(pUidList); for (int i = 0; i < numOfTables; i++) { - STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0}; + STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(pUidList, i), .groupId = 0}; void* p = taosArrayPush(pListInfo->pTableList, &info); if (p == NULL) { - taosArrayDestroy(res); + taosArrayDestroy(pUidList); return TSDB_CODE_OUT_OF_MEMORY; } - qTrace("tagfilter get uid:%" PRIu64 "", info.uid); + qTrace("tagfilter get uid:%" PRIu64 ", %s", info.uid, idstr); } - taosArrayDestroy(res); + taosArrayDestroy(pUidList); return code; } @@ -1546,6 +1522,8 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) { SFuncExecEnv env = {0}; pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId; + pCtx->isPseudoFunc = fmIsWindowPseudoColumnFunc(pCtx->functionId); + pCtx->isNotNullFunc = fmIsNotNullOutputFunc(pCtx->functionId); if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) { bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId); @@ -1553,7 +1531,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet); } else { char* udfName = pExpr->pExpr->_function.pFunctNode->functionName; - tstrncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN); + pCtx->udfName = strdup(udfName); fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet); } pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env); @@ -1859,7 +1837,7 @@ int32_t tableListAddTableInfo(STableListInfo* pTableList, uint64_t uid, uint64_t int32_t tableListGetGroupList(const STableListInfo* pTableList, int32_t ordinalGroupIndex, STableKeyInfo** pKeyInfo, int32_t* size) { int32_t totalGroups = tableListGetOutputGroups(pTableList); - int32_t numOfTables = tableListGetSize(pTableList); + int32_t numOfTables = tableListGetSize(pTableList); if (ordinalGroupIndex < 0 || ordinalGroupIndex >= totalGroups) { return TSDB_CODE_INVALID_PARA; @@ -2042,7 +2020,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags return TSDB_CODE_INVALID_PARA; } - int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo); + int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, idStr); if (code != TSDB_CODE_SUCCESS) { qError("failed to getTableList, code: %s", tstrerror(code)); return code; diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 08d7f02f8c..e6ccb99b10 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -176,10 +176,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i // set the number of rows in current disk page SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num); + + memset((char*) pResultRow, 0, interBufSize); pResultRow->pageId = pageId; pResultRow->offset = (int32_t)pData->num; - *currentPageId = pageId; + *currentPageId = pageId; pData->num += interBufSize; return pResultRow; } @@ -363,7 +365,7 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC pCtx[k].input.colDataSMAIsSet = false; } - if (fmIsWindowPseudoColumnFunc(pCtx[k].functionId)) { + if (pCtx[k].isPseudoFunc) { SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[k]); char* p = GET_ROWCELL_INTERBUF(pEntryInfo); @@ -395,19 +397,20 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC } } -static void doSetInputDataBlockInfo(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order) { +static void doSetInputDataBlockInfo(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order, int32_t scanFlag) { SqlFunctionCtx* pCtx = pExprSup->pCtx; for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) { pCtx[i].order = order; pCtx[i].input.numOfRows = pBlock->info.rows; setBlockSMAInfo(&pCtx[i], &pExprSup->pExprInfo[i], pBlock); pCtx[i].pSrcBlock = pBlock; + pCtx[i].scanFlag = scanFlag; } } void setInputDataBlock(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, bool createDummyCol) { if (pBlock->pBlockAgg != NULL) { - doSetInputDataBlockInfo(pExprSup, pBlock, order); + doSetInputDataBlockInfo(pExprSup, pBlock, order, scanFlag); } else { doSetInputDataBlock(pExprSup, pBlock, order, scanFlag, createDummyCol); } @@ -537,7 +540,7 @@ bool functionNeedToExecute(SqlFunctionCtx* pCtx) { return false; } - if (pCtx->scanFlag == REPEAT_SCAN) { + if (pCtx->scanFlag == PRE_SCAN) { return fmIsRepeatScanFunc(pCtx->functionId); } @@ -817,7 +820,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO continue; } - if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) { + if (pCtx[i].isPseudoFunc) { continue; } @@ -1075,7 +1078,7 @@ void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExpr pRow->numOfRows = pResInfo->numOfRes; } - if (fmIsNotNullOutputFunc(pCtx[j].functionId)) { + if (pCtx[j].isNotNullFunc) { returnNotNull = true; } } @@ -1199,9 +1202,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS } if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) { - ASSERT(pBlock->info.rows > 0); releaseBufPage(pBuf, page); - break; + + if (pBlock->info.rows <= 0 || pRow->numOfRows > pBlock->info.capacity) { + qError("error in copy data to ssdatablock, existed rows in block:%d, rows in pRow:%d, capacity:%d, %s", + pBlock->info.rows, pRow->numOfRows, pBlock->info.capacity, GET_TASKID(pTaskInfo)); + T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR); + } else { + break; + } } pGroupResInfo->index += 1; @@ -1742,12 +1751,12 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize, const char* pKey) { int32_t code = 0; - _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); +// _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pAggSup->currentPageId = -1; pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput); pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t)); - pAggSup->pResultRowHashTable = tSimpleHashInit(100, hashFn); + pAggSup->pResultRowHashTable = tSimpleHashInit(100, taosFastHash); if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1832,6 +1841,10 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { taosMemoryFreeClear(pCtx[i].subsidiaries.buf); taosMemoryFree(pCtx[i].input.pData); taosMemoryFree(pCtx[i].input.pColumnDataAgg); + + if (pCtx[i].udfName != NULL) { + taosMemoryFree(pCtx[i].udfName); + } } taosMemoryFreeClear(pCtx); @@ -1962,6 +1975,22 @@ void destroyAggOperatorInfo(void* param) { taosMemoryFreeClear(param); } +static char* buildTaskId(uint64_t taskId, uint64_t queryId) { + char* p = taosMemoryMalloc(64); + + int32_t offset = 6; + memcpy(p, "TID:0x", offset); + offset += tintToHex(taskId, &p[offset]); + + memcpy(&p[offset], " QID:0x", 7); + offset += 7; + offset += tintToHex(queryId, &p[offset]); + + p[offset] = 0; + + return p; +} + static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPTR_EXEC_MODEL model, char* dbFName) { SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo)); if (pTaskInfo == NULL) { @@ -1972,16 +2001,13 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); pTaskInfo->schemaInfo.dbname = strdup(dbFName); - pTaskInfo->id.queryId = queryId; pTaskInfo->execModel = model; pTaskInfo->pTableInfoList = tableListCreate(); pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo)); pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES); - char* p = taosMemoryCalloc(1, 128); - snprintf(p, 128, "TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, queryId); - pTaskInfo->id.str = p; - + pTaskInfo->id.queryId = queryId; + pTaskInfo->id.str = buildTaskId(taskId, queryId); return pTaskInfo; } diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c index f30fe30e35..483d94e8b1 100644 --- a/source/libs/executor/src/filloperator.c +++ b/source/libs/executor/src/filloperator.c @@ -832,10 +832,13 @@ static bool checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t group return true; } -static void buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFillSup, TSKEY ts, SSDataBlock* pBlock) { +static bool buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFillSup, TSKEY ts, SSDataBlock* pBlock) { + if (pBlock->info.rows >= pBlock->info.capacity) { + return false; + } uint64_t groupId = pBlock->info.id.groupId; if (pFillSup->hasDelete && !checkResult(pFillSup, ts, groupId)) { - return; + return true; } for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) { SFillColInfo* pFillCol = pFillSup->pAllColInfo + i; @@ -853,6 +856,7 @@ static void buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFill } } pBlock->info.rows++; + return true; } static bool hasRemainCalc(SStreamFillInfo* pFillInfo) { @@ -932,7 +936,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* } if (pFillInfo->pos == FILL_POS_START) { - buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes); + if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) { + pFillInfo->pos = FILL_POS_INVALID; + } } if (pFillInfo->type != TSDB_FILL_LINEAR) { doStreamFillNormal(pFillSup, pFillInfo, pRes); @@ -940,7 +946,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* doStreamFillLinear(pFillSup, pFillInfo, pRes); if (pFillInfo->pos == FILL_POS_MID) { - buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes); + if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) { + pFillInfo->pos = FILL_POS_INVALID; + } } if (pFillInfo->current > pFillInfo->end && pFillInfo->pLinearInfo->hasNext) { @@ -954,7 +962,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* } } if (pFillInfo->pos == FILL_POS_END) { - buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes); + if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) { + pFillInfo->pos = FILL_POS_INVALID; + } } } @@ -989,10 +999,6 @@ static void doStreamFillImpl(SOperatorInfo* pOperator) { uint64_t groupId = pBlock->info.id.groupId; SSDataBlock* pRes = pInfo->pRes; pRes->info.id.groupId = groupId; - if (hasRemainCalc(pFillInfo)) { - doStreamFillRange(pFillInfo, pFillSup, pRes); - } - SColumnInfoData* pTsCol = taosArrayGet(pInfo->pSrcBlock->pDataBlock, pInfo->primaryTsCol); TSKEY* tsCol = (TSKEY*)pTsCol->pData; @@ -1204,13 +1210,14 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) { return NULL; } blockDataCleanup(pInfo->pRes); - if (pOperator->status == OP_RES_TO_RETURN) { - if (hasRemainCalc(pInfo->pFillInfo)) { - doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes); - if (pInfo->pRes->info.rows > 0) { - return pInfo->pRes; - } + if (hasRemainCalc(pInfo->pFillInfo) || (pInfo->pFillInfo->pos != FILL_POS_INVALID && pInfo->pFillInfo->needFill == true )) { + doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes); + if (pInfo->pRes->info.rows > 0) { + printDataBlock(pInfo->pRes, "stream fill"); + return pInfo->pRes; } + } + if (pOperator->status == OP_RES_TO_RETURN) { doDeleteFillFinalize(pOperator); if (pInfo->pRes->info.rows > 0) { printDataBlock(pInfo->pRes, "stream fill"); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index cd5c23f95c..9a56512683 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -703,7 +703,8 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); - pTableScanInfo->base.scanFlag = REPEAT_SCAN; + pTableScanInfo->base.scanFlag = MAIN_SCAN; + pTableScanInfo->base.dataBlockLoadFlag = FUNC_DATA_REQUIRED_DATA_LOAD; qDebug("start to repeat ascending order scan data blocks due to query func required, %s", GET_TASKID(pTaskInfo)); // do prepare for the next round table scan operation @@ -729,7 +730,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) { if (pTableScanInfo->scanTimes < total) { setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); - pTableScanInfo->base.scanFlag = REPEAT_SCAN; + pTableScanInfo->base.scanFlag = MAIN_SCAN; qDebug("%s start to repeat descending order scan data blocks", GET_TASKID(pTaskInfo)); tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond); @@ -785,6 +786,10 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); } + + if (pInfo->pResBlock->info.capacity > pOperator->resultInfo.capacity) { + pOperator->resultInfo.capacity = pInfo->pResBlock->info.capacity; + } } SSDataBlock* result = doGroupedTableScan(pOperator); @@ -877,8 +882,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, } pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]}; + pInfo->base.scanFlag = (pInfo->scanInfo.numOfAsc > 1) ? PRE_SCAN : MAIN_SCAN; - pInfo->base.scanFlag = MAIN_SCAN; pInfo->base.pdInfo.interval = extractIntervalInfo(pTableScanNode); pInfo->base.readHandle = *readHandle; pInfo->base.dataBlockLoadFlag = pTableScanNode->dataRequired; @@ -888,7 +893,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, initResultSizeInfo(&pOperator->resultInfo, 4096); pInfo->pResBlock = createDataBlockFromDescNode(pDescNode); - blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity); + // blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity); code = filterInitFromNode((SNode*)pTableScanNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0); if (code != TSDB_CODE_SUCCESS) { @@ -1175,6 +1180,20 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32 } } +static int32_t getPreSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId, + SSessionKey* pKey) { + pKey->win.skey = startTs; + pKey->win.ekey = endTs; + pKey->groupId = groupId; + + SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev(pAggSup->pState, pKey); + int32_t code = streamStateSessionGetKVByCur(pCur, pKey, NULL, 0); + if (code != TSDB_CODE_SUCCESS) { + SET_SESSION_WIN_KEY_INVALID(pKey); + } + return code; +} + static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) { blockDataCleanup(pDestBlock); if (pSrcBlock->info.rows == 0) { @@ -1210,7 +1229,14 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr } SSessionKey endWin = {0}; getCurSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, &endWin); - ASSERT(!IS_INVALID_SESSION_WIN_KEY(endWin)); + if (IS_INVALID_SESSION_WIN_KEY(endWin)) { + getPreSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, &endWin); + } + if (IS_INVALID_SESSION_WIN_KEY(startWin)) { + // window has been closed. + qError("generate session scan range failed. rang start:%" PRIx64 ", end:%" PRIx64, startData[i], endData[i]); + continue; + } colDataAppend(pDestStartCol, i, (const char*)&startWin.win.skey, false); colDataAppend(pDestEndCol, i, (const char*)&endWin.win.ekey, false); @@ -1433,7 +1459,7 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock dumyInfo.cur.pageId = -1; bool isClosed = false; STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX}; - bool overDue = isOverdue(tsCol[rowId], &pInfo->twAggSup); + bool overDue = isOverdue(tsCol[rowId], &pInfo->twAggSup); if (pInfo->igExpired && overDue) { continue; } @@ -1607,19 +1633,20 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) { while (1) { SFetchRet ret = {0}; - tqNextBlock(pInfo->tqReader, &ret); + if (tqNextBlock(pInfo->tqReader, &ret) < 0) { + qError("failed to get next log block since %s", terrstr()); + } if (ret.fetchType == FETCH_TYPE__DATA) { blockDataCleanup(pInfo->pRes); - if (setBlockIntoRes(pInfo, &ret.data, true) < 0) { - ASSERT(0); - } + setBlockIntoRes(pInfo, &ret.data, true); if (pInfo->pRes->info.rows > 0) { pOperator->status = OP_EXEC_RECV; qDebug("queue scan log return %d rows", pInfo->pRes->info.rows); return pInfo->pRes; } } else if (ret.fetchType == FETCH_TYPE__META) { - ASSERT(0); + qError("unexpected ret.fetchType:%d", ret.fetchType); + continue; // pTaskInfo->streamInfo.lastStatus = ret.offset; // pTaskInfo->streamInfo.metaBlk = ret.meta; // return NULL; @@ -1646,7 +1673,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { return NULL; #endif } else { - ASSERT(0); + qError("unexpected streamInfo prepare type: %d", pTaskInfo->streamInfo.prepareStatus.type); return NULL; } } @@ -2305,13 +2332,6 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER; pTSInfo->base.dataReader = NULL; pTaskInfo->streamInfo.lastStatus.uid = -1; -// code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, pTSInfo->pResBlock, -// &pTSInfo->base.dataReader, NULL); -// if (code != 0) { -// terrno = code; -// destroyTableScanOperatorInfo(pTableScanOp); -// goto _error; -// } } if (pHandle->initTqReader) { @@ -2786,6 +2806,10 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { SSDataBlock* pBlock = NULL; while (pInfo->tableStartIndex < tableListSize) { + if (isTaskKilled(pTaskInfo)) { + T_LONG_JMP(pTaskInfo->env, pTaskInfo->code); + } + pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pOperator); if (pBlock != NULL) { diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 73abbe2f89..1b2f135064 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -2254,7 +2254,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi } pInfo->readHandle = *readHandle; - pInfo->uid = pBlockScanNode->suid; + pInfo->uid = (pBlockScanNode->suid != 0)? pBlockScanNode->suid:pBlockScanNode->uid; int32_t numOfCols = 0; SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 792225e16c..d379b9dfeb 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -119,8 +119,8 @@ static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsL pRowSup->groupId = groupId; } -FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, - int32_t pos, int32_t order, int64_t* pData) { +FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int32_t pos, + int32_t order, int64_t* pData) { int32_t forwardRows = 0; if (order == TSDB_ORDER_ASC) { @@ -2853,6 +2853,8 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t nums) { for (int i = 0; i < nums; i++) { pDummy[i].functionId = pCtx[i].functionId; + pDummy[i].isNotNullFunc = pCtx[i].isNotNullFunc; + pDummy[i].isPseudoFunc = pCtx[i].isPseudoFunc; } } @@ -3377,9 +3379,11 @@ static void copyDeleteWindowInfo(SArray* pResWins, SSHashObj* pStDeleted) { } } +// the allocated memory comes from outer function. void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) { pGroupResInfo->pRows = pArrayList; pGroupResInfo->index = 0; + pGroupResInfo->pBuf = NULL; } void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroupResInfo* pGroupResInfo, @@ -3390,8 +3394,7 @@ void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroup blockDataCleanup(pBlock); if (!hasRemainResults(pGroupResInfo)) { - taosArrayDestroy(pGroupResInfo->pRows); - pGroupResInfo->pRows = NULL; + cleanupGroupResInfo(pGroupResInfo); return; } @@ -4826,6 +4829,12 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { tSimpleHashCleanup(pInfo->pUpdatedMap); pInfo->pUpdatedMap = NULL; +#if 0 + char* pBuf = streamStateIntervalDump(pInfo->pState); + qDebug("===stream===interval state%s", pBuf); + taosMemoryFree(pBuf); +#endif + doBuildDeleteResult(pInfo, pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); if (pInfo->pDelRes->info.rows > 0) { printDataBlock(pInfo->pDelRes, "single interval delete"); diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c index d97f81c994..2cba3855c7 100644 --- a/source/libs/executor/src/tlinearhash.c +++ b/source/libs/executor/src/tlinearhash.c @@ -123,8 +123,6 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t } static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket* pBucket) { - ASSERT(pPage != NULL && pNode != NULL && pBucket->size >= 1); - int32_t len = GET_LHASH_NODE_LEN(pNode); char* p = (char*)pNode + len; @@ -301,8 +299,6 @@ void* tHashCleanup(SLHashObj* pHashObj) { } int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data, size_t size) { - ASSERT(pHashObj != NULL && key != NULL); - if (pHashObj->bits == 0) { SLHashBucket* pBucket = pHashObj->pBucket[0]; doAddToBucket(pHashObj, pBucket, 0, key, keyLen, data, size); @@ -363,14 +359,12 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data if (v1 != splitBucketId) { // place it into the new bucket ASSERT(v1 == newBucketId); // printf("move key:%d to 0x%x bucket, remain items:%d\n", *(int32_t*)k, v1, pBucket->size - 1); - SLHashBucket* pNewBucket = pHashObj->pBucket[newBucketId]; doAddToBucket(pHashObj, pNewBucket, newBucketId, (void*)GET_LHASH_NODE_KEY(pNode), pNode->keyLen, GET_LHASH_NODE_KEY(pNode), pNode->dataLen); doRemoveFromBucket(p, pNode, pBucket); } else { // printf("check key:%d, located into: %d, skip it\n", *(int*) k, v1); - int32_t nodeSize = GET_LHASH_NODE_LEN(pStart); pStart += nodeSize; } @@ -385,7 +379,6 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data } char* tHashGet(SLHashObj* pHashObj, const void* key, size_t keyLen) { - ASSERT(pHashObj != NULL && key != NULL && keyLen > 0); int32_t hashv = pHashObj->hashFn(key, keyLen); int32_t bucketId = doGetBucketIdFromHashVal(hashv, pHashObj->bits); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 1a0437c26a..0257b3d5e6 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -504,27 +504,45 @@ static int32_t translateTimezone(SFunctionNode* pFunc, char* pErrBuf, int32_t le } static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - if (2 != LIST_LENGTH(pFunc->pParameterList)) { + int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); + if (numOfParams < 2 || numOfParams > 11) { return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); } - // param1 - SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1); - - if (pValue->datum.i < 0 || pValue->datum.i > 100) { - return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); - } - - pValue->notReserved = true; uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; - uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type; - if (!IS_NUMERIC_TYPE(para1Type) || (!IS_SIGNED_NUMERIC_TYPE(para2Type) && !IS_UNSIGNED_NUMERIC_TYPE(para2Type))) { + if (!IS_NUMERIC_TYPE(para1Type)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } + + for (int32_t i = 1; i < numOfParams; ++i) { + SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, i); + pValue->notReserved = true; + + uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type; + if (!IS_NUMERIC_TYPE(paraType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + + double v = 0; + if (IS_INTEGER_TYPE(paraType)) { + v = (double)pValue->datum.i; + } else { + v = pValue->datum.d; + } + + if (v < 0 || v > 100) { + return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName); + } + } + // set result type - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + if (numOfParams > 2) { + pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_VARCHAR}; + } else { + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + } return TSDB_CODE_SUCCESS; } @@ -2273,8 +2291,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "percentile", .type = FUNCTION_TYPE_PERCENTILE, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_REPEAT_SCAN_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_REPEAT_SCAN_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translatePercentile, + .dataRequiredFunc = statisDataRequired, .getEnvFunc = getPercentileFuncEnv, .initFunc = percentileFunctionSetup, .processFunc = percentileFunction, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 0c491addd5..8e52ae5f30 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -789,17 +789,46 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0) ? 1 : 0; - if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) { - float v = GET_FLOAT_VAL(&pRes->v); - colDataAppend(pCol, currentRow, (const char*)&v, pEntryInfo->isNullRes); + // NOTE: do nothing change it, for performance issue + if (!pEntryInfo->isNullRes) { + switch (pCol->info.type) { + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_BIGINT: + ((int64_t*)pCol->pData)[currentRow] = pRes->v; +// colDataAppendInt64(pCol, currentRow, &pRes->v); + break; + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_INT: + colDataAppendInt32(pCol, currentRow, (int32_t*)&pRes->v); + break; + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_SMALLINT: + colDataAppendInt16(pCol, currentRow, (int16_t*)&pRes->v); + break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + colDataAppendInt8(pCol, currentRow, (int8_t*)&pRes->v); + break; + case TSDB_DATA_TYPE_DOUBLE: + colDataAppendDouble(pCol, currentRow, (double*)&pRes->v); + break; + case TSDB_DATA_TYPE_FLOAT: { + float v = GET_FLOAT_VAL(&pRes->v); + colDataAppendFloat(pCol, currentRow, &v); + break; + } + } } else { - colDataAppend(pCol, currentRow, (const char*)&pRes->v, pEntryInfo->isNullRes); + colDataAppendNULL(pCol, currentRow); } - if (pEntryInfo->numOfRes > 0) { - code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow); - } else { - code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); + if (pCtx->subsidiaries.num > 0) { + if (pEntryInfo->numOfRes > 0) { + code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow); + } else { + code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow); + } } return code; @@ -1570,7 +1599,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { int32_t type = pCol->info.type; SPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo); - if (pCtx->scanFlag == REPEAT_SCAN && pInfo->stage == 0) { + if (pCtx->scanFlag == MAIN_SCAN && pInfo->stage == 0) { pInfo->stage += 1; // all data are null, set it completed @@ -1653,26 +1682,67 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) { } int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { - SVariant* pVal = &pCtx->param[1].param; - int32_t code = 0; - double v = 0; - - GET_TYPED_DATA(v, double, pVal->nType, &pVal->i); - SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SPercentileInfo* ppInfo = (SPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo); + int32_t code = 0; + double v = 0; + tMemBucket* pMemBucket = ppInfo->pMemBucket; - if (pMemBucket != NULL && pMemBucket->total > 0) { // check for null - code = getPercentile(pMemBucket, v, &ppInfo->result); + if (pMemBucket == NULL || pMemBucket->total == 0) { // check for null + code = TSDB_CODE_FAILED; + goto _fin_error; } + if (pCtx->numOfParams > 2) { + char buf[512] = {0}; + size_t len = 1; + + varDataVal(buf)[0] = '['; + for (int32_t i = 1; i < pCtx->numOfParams; ++i) { + SVariant* pVal = &pCtx->param[i].param; + + GET_TYPED_DATA(v, double, pVal->nType, &pVal->i); + + int32_t code = getPercentile(pMemBucket, v, &ppInfo->result); + if (code != TSDB_CODE_SUCCESS) { + goto _fin_error; + } + + if (i == pCtx->numOfParams - 1) { + len += snprintf(varDataVal(buf) + len, sizeof(buf) - VARSTR_HEADER_SIZE - len, "%.6lf]", ppInfo->result); + } else { + len += snprintf(varDataVal(buf) + len, sizeof(buf) - VARSTR_HEADER_SIZE - len, "%.6lf, ", ppInfo->result); + } + } + + int32_t slotId = pCtx->pExpr->base.resSchema.slotId; + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); + + varDataSetLen(buf, len); + colDataAppend(pCol, pBlock->info.rows, buf, false); + + tMemBucketDestroy(pMemBucket); + return pResInfo->numOfRes; + } else { + SVariant* pVal = &pCtx->param[1].param; + + GET_TYPED_DATA(v, double, pVal->nType, &pVal->i); + + code = getPercentile(pMemBucket, v, &ppInfo->result); + if (code != TSDB_CODE_SUCCESS) { + goto _fin_error; + } + + tMemBucketDestroy(pMemBucket); + return functionFinalize(pCtx, pBlock); + } + +_fin_error: + tMemBucketDestroy(pMemBucket); - if (code != TSDB_CODE_SUCCESS) { - return code; - } + return code; - return functionFinalize(pCtx, pBlock); } bool getApercentileFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) { diff --git a/source/libs/function/src/detail/tminmax.c b/source/libs/function/src/detail/tminmax.c index a511ca97f1..3ca1c06303 100644 --- a/source/libs/function/src/detail/tminmax.c +++ b/source/libs/function/src/detail/tminmax.c @@ -61,6 +61,8 @@ } \ } +#define GET_INVOKE_INTRINSIC_THRESHOLD(_bits, _bytes) ((_bits) / ((_bytes) << 3u)) + static void calculateRounds(int32_t numOfRows, int32_t bytes, int32_t* remainder, int32_t* rounds, int32_t* width) { const int32_t bitWidth = 256; @@ -372,7 +374,7 @@ static void handleInt8Col(const void* data, int32_t start, int32_t numOfRows, SM pBuf->v = i8VectorCmpAVX2(data, numOfRows, isMinFunc, signVal); } else { if (!pBuf->assign) { - pBuf->v = ((int8_t*)data)[0]; + pBuf->v = ((int8_t*)data)[start]; } if (signVal) { @@ -406,7 +408,7 @@ static void handleInt16Col(const void* data, int32_t start, int32_t numOfRows, S pBuf->v = i16VectorCmpAVX2(data, numOfRows, isMinFunc, signVal); } else { if (!pBuf->assign) { - pBuf->v = ((int16_t*)data)[0]; + pBuf->v = ((int16_t*)data)[start]; } if (signVal) { @@ -440,7 +442,7 @@ static void handleInt32Col(const void* data, int32_t start, int32_t numOfRows, S pBuf->v = i32VectorCmpAVX2(data, numOfRows, isMinFunc, signVal); } else { if (!pBuf->assign) { - pBuf->v = ((int32_t*)data)[0]; + pBuf->v = ((int32_t*)data)[start]; } if (signVal) { @@ -470,7 +472,7 @@ static void handleInt32Col(const void* data, int32_t start, int32_t numOfRows, S static void handleInt64Col(const void* data, int32_t start, int32_t numOfRows, SMinmaxResInfo* pBuf, bool isMinFunc, bool signVal) { if (!pBuf->assign) { - pBuf->v = ((int64_t*)data)[0]; + pBuf->v = ((int64_t*)data)[start]; } if (signVal) { @@ -504,7 +506,7 @@ static void handleFloatCol(SColumnInfoData* pCol, int32_t start, int32_t numOfRo *val = floatVectorCmpAVX(pData, numOfRows, isMinFunc); } else { if (!pBuf->assign) { - *val = pData[0]; + *val = pData[start]; } if (isMinFunc) { // min @@ -535,7 +537,7 @@ static void handleDoubleCol(SColumnInfoData* pCol, int32_t start, int32_t numOfR *val = (double)doubleVectorCmpAVX(pData, numOfRows, isMinFunc); } else { if (!pBuf->assign) { - *val = pData[0]; + *val = pData[start]; } if (isMinFunc) { // min @@ -700,8 +702,29 @@ static void doExtractVal(SColumnInfoData* pCol, int32_t i, int32_t end, SqlFunct } } +static int32_t saveRelatedTuple(SqlFunctionCtx* pCtx, SInputColumnInfoData* pInput, int32_t index, void* tval) { + SColumnInfoData* pCol = pInput->pData[0]; + + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + SMinmaxResInfo* pBuf = GET_ROWCELL_INTERBUF(pResInfo); + + int32_t code = 0; + if (pCtx->subsidiaries.num > 0) { + index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); + if (index >= 0) { + code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + } + + return code; +} + int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) { int32_t numOfElems = 0; + int32_t code = TSDB_CODE_SUCCESS; SInputColumnInfoData* pInput = &pCtx->input; SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0]; @@ -719,6 +742,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) // data in current data block are qualified to the query if (pInput->colDataSMAIsSet) { + numOfElems = pInput->numOfRows - pAgg->numOfNull; if (numOfElems == 0) { goto _over; @@ -734,15 +758,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) pBuf->v = GET_INT64_VAL(tval); } - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } else { if (IS_SIGNED_NUMERIC_TYPE(type)) { int64_t prev = 0; @@ -751,15 +767,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) int64_t val = GET_INT64_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_INT64_VAL(&pBuf->v) = val; - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { uint64_t prev = 0; @@ -768,15 +776,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) uint64_t val = GET_UINT64_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_UINT64_VAL(&pBuf->v) = val; - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } else if (type == TSDB_DATA_TYPE_DOUBLE) { double prev = 0; @@ -785,15 +785,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) double val = GET_DOUBLE_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_DOUBLE_VAL(&pBuf->v) = val; - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } else if (type == TSDB_DATA_TYPE_FLOAT) { float prev = 0; @@ -802,16 +794,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) float val = GET_DOUBLE_VAL(tval); if ((prev < val) ^ isMinFunc) { GET_FLOAT_VAL(&pBuf->v) = val; - } - - if (pCtx->subsidiaries.num > 0) { - index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval); - if (index >= 0) { - int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - } + code = saveRelatedTuple(pCtx, pInput, index, tval); } } } @@ -825,14 +808,51 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) int32_t numOfRows = pInput->numOfRows; int32_t end = start + numOfRows; - if (pCol->hasNull || numOfRows < 32 || pCtx->subsidiaries.num > 0) { + // clang-format off + int32_t threshold[] = { + //NULL, BOOL, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, VARCHAR, TIMESTAMP, NCHAR, + INT32_MAX, INT32_MAX, 32, 16, 8, 4, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX, + // UTINYINT,USMALLINT, UINT, UBIGINT, JSON, VARBINARY, DECIMAL, BLOB, MEDIUMBLOB, BINARY + 32, 16, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, + }; + // clang-format on + + if (pCol->hasNull || numOfRows < threshold[pCol->info.type] || pCtx->subsidiaries.num > 0) { int32_t i = findFirstValPosition(pCol, start, numOfRows); if ((i < end) && (!pBuf->assign)) { - memcpy(&pBuf->v, pCol->pData + (pCol->info.bytes * i), pCol->info.bytes); + char* p = pCol->pData + pCol->info.bytes * i; + + switch (type) { + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_BIGINT: + pBuf->v = *(int64_t*)p; + break; + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_INT: + pBuf->v = *(int32_t*)p; + break; + case TSDB_DATA_TYPE_USMALLINT: + case TSDB_DATA_TYPE_SMALLINT: + pBuf->v = *(int16_t*)p; + break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + pBuf->v = *(int8_t*)p; + break; + case TSDB_DATA_TYPE_FLOAT: { + *(float*)&pBuf->v = *(float*)p; + break; + } + default: + memcpy(&pBuf->v, p, pCol->info.bytes); + break; + } if (pCtx->subsidiaries.num > 0) { - int32_t code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); + code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -849,7 +869,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) } else { numOfElems = numOfRows; - switch (pCol->info.type) { + switch (type) { case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: { handleInt8Col(pCol->pData, start, numOfRows, pBuf, isMinFunc, true); @@ -898,13 +918,14 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) _over: if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) { - int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos); + code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos); if (code != TSDB_CODE_SUCCESS) { return code; } + pBuf->nullTupleSaved = true; } *nElems = numOfElems; - return TSDB_CODE_SUCCESS; + return code; } diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 97fe94b513..de381fadbd 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -354,8 +354,6 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT * in memory bucket, we only accept data array list */ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) { - ASSERT(pBucket != NULL && data != NULL && size > 0); - int32_t count = 0; int32_t bytes = pBucket->bytes; for (int32_t i = 0; i < size; ++i) { diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index c9fa70ff11..b9e72847a1 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -812,7 +812,7 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) { block->info.hasVarCol = IS_VAR_DATA_TYPE(udfCol->colMeta.type); block->pDataBlock = taosArrayInit(1, sizeof(SColumnInfoData)); - taosArraySetSize(block->pDataBlock, 1); + taosArrayPush(block->pDataBlock, &(SColumnInfoData){0}); SColumnInfoData *col = taosArrayGet(block->pDataBlock, 0); SUdfColumnMeta *meta = &udfCol->colMeta; col->info.precision = meta->precision; diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index e94b2bba6c..ad80508c64 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -315,6 +315,11 @@ static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int32_t len) { } static int32_t tlvDecodeValueImpl(STlvDecoder* pDecoder, void* pValue, int32_t len) { + // compatible with lower version messages + if (pDecoder->bufSize == pDecoder->offset) { + memset(pValue, 0, len); + return TSDB_CODE_SUCCESS; + } if (len > pDecoder->bufSize - pDecoder->offset) { return TSDB_CODE_FAILED; } diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 19992498a0..b7f3465a4a 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -568,6 +568,7 @@ stream_options(A) ::= stream_options(B) TRIGGER MAX_DELAY duration_literal(C). stream_options(A) ::= stream_options(B) WATERMARK duration_literal(C). { ((SStreamOptions*)B)->pWatermark = releaseRawExprNode(pCxt, C); A = B; } stream_options(A) ::= stream_options(B) IGNORE EXPIRED NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreExpired = taosStr2Int8(C.z, NULL, 10); A = B; } stream_options(A) ::= stream_options(B) FILL_HISTORY NK_INTEGER(C). { ((SStreamOptions*)B)->fillHistory = taosStr2Int8(C.z, NULL, 10); A = B; } +stream_options(A) ::= stream_options(B) DELETE_MARK duration_literal(C). { ((SStreamOptions*)B)->pDeleteMark = releaseRawExprNode(pCxt, C); A = B; } stream_options(A) ::= stream_options(B) IGNORE UPDATE NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreUpdate = taosStr2Int8(C.z, NULL, 10); A = B; } subtable_opt(A) ::= . { A = NULL; } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 60c0217a97..7b6f795ecf 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -666,6 +666,9 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) { if (isSetOperator(pCurrStmt)) { return ((SSetOperator*)pCurrStmt)->precision; } + if (NULL != pCurrStmt && QUERY_NODE_CREATE_STREAM_STMT == nodeType(pCurrStmt)) { + return getPrecisionFromCurrStmt(((SCreateStreamStmt*)pCurrStmt)->pQuery, defaultVal); + } return defaultVal; } @@ -1464,6 +1467,15 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; + + if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) || + (TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType && + TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, + "%s is only supported in single table query", pFunc->functionName); + } + if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc || pSelect->hasIndefiniteRowsFunc) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } @@ -5634,16 +5646,6 @@ static bool crossTableWithUdaf(SSelectStmt* pSelect) { } static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { - if (NULL != pStmt->pOptions->pWatermark && - (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pWatermark))) { - return pCxt->errCode; - } - - if (NULL != pStmt->pOptions->pDelay && - (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pDelay))) { - return pCxt->errCode; - } - if (NULL == pStmt->pQuery) { return TSDB_CODE_SUCCESS; } @@ -5951,6 +5953,7 @@ static int32_t adjustOrderOfProjections(STranslateContext* pCxt, SNodeList* pCol } int32_t code = TSDB_CODE_SUCCESS; + bool hasPrimaryKey = false; SNode* pCol = NULL; SNode* pProj = NULL; FORBOTH(pCol, pCols, pProj, *pProjections) { @@ -5964,6 +5967,14 @@ static int32_t adjustOrderOfProjections(STranslateContext* pCxt, SNodeList* pCol if (TSDB_CODE_SUCCESS != code) { break; } + if (PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) { + hasPrimaryKey = true; + } + } + + if (TSDB_CODE_SUCCESS == code && !hasPrimaryKey) { + code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, + "primary timestamp column can not be null"); } SNodeList* pNewProjections = NULL; @@ -6006,7 +6017,15 @@ static int32_t adjustProjectionsForExistTable(STranslateContext* pCxt, SCreateSt return adjustOrderOfProjections(pCxt, pStmt->pCols, pMeta, &pSelect->pProjectionList, pReq); } +static bool isGroupIdTagStream(const STableMeta* pMeta, SNodeList* pTags) { + return (NULL == pTags && 1 == pMeta->tableInfo.numOfTags && TSDB_DATA_TYPE_UBIGINT == getTableTagSchema(pMeta)->type); +} + static int32_t adjustDataTypeOfTags(STranslateContext* pCxt, const STableMeta* pMeta, SNodeList* pTags) { + if (isGroupIdTagStream(pMeta, pTags)) { + return TSDB_CODE_SUCCESS; + } + if (getNumOfTags(pMeta) != LIST_LENGTH(pTags)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of tags"); } @@ -6207,6 +6226,17 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt return code; } +static int32_t translateStreamOptions(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { + pCxt->pCurrStmt = (SNode*)pStmt; + SStreamOptions* pOptions = pStmt->pOptions; + if ((NULL != pOptions->pWatermark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pWatermark))) || + (NULL != pOptions->pDeleteMark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pDeleteMark))) || + (NULL != pOptions->pDelay && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pDelay)))) { + return pCxt->errCode; + } + return TSDB_CODE_SUCCESS; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -6228,10 +6258,16 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } } + if (TSDB_CODE_SUCCESS == code) { + code = translateStreamOptions(pCxt, pStmt); + } + if (TSDB_CODE_SUCCESS == code) { pReq->triggerType = pStmt->pOptions->triggerType; pReq->maxDelay = (NULL != pStmt->pOptions->pDelay ? ((SValueNode*)pStmt->pOptions->pDelay)->datum.i : 0); pReq->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0); + pReq->deleteMark = + (NULL != pStmt->pOptions->pDeleteMark ? ((SValueNode*)pStmt->pOptions->pDeleteMark)->datum.i : 0); pReq->fillHistory = pStmt->pOptions->fillHistory; pReq->igExpired = pStmt->pOptions->ignoreExpired; if (pReq->createStb) { diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index 836d1c5de9..f38233aa76 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -139,17 +139,17 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 741 -#define YYNRULE 562 +#define YYNSTATE 742 +#define YYNRULE 563 #define YYNTOKEN 328 -#define YY_MAX_SHIFT 740 -#define YY_MIN_SHIFTREDUCE 1099 -#define YY_MAX_SHIFTREDUCE 1660 -#define YY_ERROR_ACTION 1661 -#define YY_ACCEPT_ACTION 1662 -#define YY_NO_ACTION 1663 -#define YY_MIN_REDUCE 1664 -#define YY_MAX_REDUCE 2225 +#define YY_MAX_SHIFT 741 +#define YY_MIN_SHIFTREDUCE 1101 +#define YY_MAX_SHIFTREDUCE 1663 +#define YY_ERROR_ACTION 1664 +#define YY_ACCEPT_ACTION 1665 +#define YY_NO_ACTION 1666 +#define YY_MIN_REDUCE 1667 +#define YY_MAX_REDUCE 2229 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -218,281 +218,281 @@ typedef union { *********** Begin parsing tables **********************************************/ #define YY_ACTTAB_COUNT (2749) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 1937, 2201, 1796, 605, 480, 2196, 481, 1700, 489, 1807, - /* 10 */ 481, 1700, 45, 43, 1588, 1935, 617, 31, 176, 178, - /* 20 */ 378, 2200, 1437, 38, 37, 2197, 2199, 44, 42, 41, - /* 30 */ 40, 39, 1858, 1518, 140, 1435, 1462, 2037, 1871, 346, - /* 40 */ 1920, 2023, 38, 37, 611, 356, 44, 42, 41, 40, - /* 50 */ 39, 423, 2019, 2201, 1869, 38, 37, 2196, 1513, 44, - /* 60 */ 42, 41, 40, 39, 18, 498, 383, 1465, 2055, 1864, - /* 70 */ 1866, 1443, 1662, 2200, 167, 605, 644, 2197, 2198, 1772, - /* 80 */ 1151, 2005, 1150, 646, 45, 43, 2015, 2021, 359, 568, - /* 90 */ 1132, 329, 378, 2196, 1437, 220, 14, 640, 339, 181, - /* 100 */ 2133, 2134, 548, 138, 2138, 1518, 140, 1435, 2202, 182, - /* 110 */ 600, 1152, 2036, 2197, 594, 546, 2072, 544, 737, 323, - /* 120 */ 2038, 650, 2040, 2041, 645, 643, 640, 631, 2090, 1134, - /* 130 */ 1513, 1137, 1138, 1520, 1521, 629, 18, 479, 391, 1547, - /* 140 */ 484, 1706, 390, 1443, 1687, 1259, 672, 671, 670, 1263, - /* 150 */ 669, 1265, 1266, 668, 1268, 665, 176, 1274, 662, 1276, - /* 160 */ 1277, 659, 656, 1493, 1503, 1937, 605, 615, 14, 1519, - /* 170 */ 1522, 266, 2133, 604, 381, 133, 603, 369, 1921, 2196, - /* 180 */ 1934, 617, 161, 568, 1438, 616, 1436, 2196, 2005, 357, - /* 190 */ 737, 1820, 629, 269, 592, 182, 1548, 140, 1869, 2197, - /* 200 */ 594, 498, 2202, 182, 629, 1520, 1521, 2197, 594, 588, - /* 210 */ 1441, 1442, 246, 1492, 1495, 1496, 1497, 1498, 1499, 1500, - /* 220 */ 1501, 1502, 642, 638, 1511, 1512, 1514, 1515, 1516, 1517, - /* 230 */ 2, 61, 496, 92, 1930, 1493, 1503, 583, 106, 684, - /* 240 */ 122, 1519, 1522, 121, 120, 119, 118, 117, 116, 115, - /* 250 */ 114, 113, 141, 1592, 350, 166, 1438, 1676, 1436, 1462, - /* 260 */ 1810, 607, 180, 2133, 2134, 1461, 138, 2138, 48, 34, - /* 270 */ 376, 1542, 1543, 1544, 1545, 1546, 1550, 1551, 1552, 1553, - /* 280 */ 48, 61, 1441, 1442, 1219, 1492, 1495, 1496, 1497, 1498, - /* 290 */ 1499, 1500, 1501, 1502, 642, 638, 1511, 1512, 1514, 1515, - /* 300 */ 1516, 1517, 2, 2023, 11, 45, 43, 44, 42, 41, - /* 310 */ 40, 39, 1462, 378, 2019, 1437, 351, 740, 349, 348, - /* 320 */ 1221, 521, 589, 584, 577, 523, 1518, 1462, 1435, 488, - /* 330 */ 2037, 294, 484, 1706, 605, 35, 287, 38, 37, 601, - /* 340 */ 410, 44, 42, 41, 40, 39, 175, 522, 2015, 2021, - /* 350 */ 360, 1513, 730, 726, 722, 718, 292, 18, 86, 640, - /* 360 */ 486, 2055, 412, 408, 1443, 140, 482, 557, 415, 647, - /* 370 */ 414, 2140, 136, 1151, 2005, 1150, 646, 45, 43, 1523, - /* 380 */ 1463, 1813, 2201, 185, 11, 378, 9, 1437, 61, 14, - /* 390 */ 278, 279, 65, 107, 413, 277, 285, 2137, 1518, 1464, - /* 400 */ 1435, 632, 1494, 2097, 1152, 2036, 1733, 630, 1686, 2072, - /* 410 */ 630, 737, 168, 2038, 650, 2040, 2041, 645, 1665, 640, - /* 420 */ 185, 132, 676, 1513, 187, 1862, 1520, 1521, 519, 626, - /* 430 */ 183, 2133, 2134, 185, 138, 2138, 1443, 630, 1818, 122, - /* 440 */ 11, 1818, 121, 120, 119, 118, 117, 116, 115, 114, - /* 450 */ 113, 132, 2005, 569, 2162, 194, 1493, 1503, 524, 1871, - /* 460 */ 100, 46, 1519, 1522, 272, 634, 366, 2097, 1818, 271, - /* 470 */ 1357, 1358, 534, 533, 532, 1869, 1650, 1438, 2026, 1436, - /* 480 */ 137, 528, 1811, 737, 61, 527, 1401, 461, 240, 1901, - /* 490 */ 526, 531, 83, 1302, 1303, 82, 525, 236, 1520, 1521, - /* 500 */ 1865, 1866, 1463, 1441, 1442, 1657, 1492, 1495, 1496, 1497, - /* 510 */ 1498, 1499, 1500, 1501, 1502, 642, 638, 1511, 1512, 1514, - /* 520 */ 1515, 1516, 1517, 2, 534, 533, 532, 2028, 1493, 1503, - /* 530 */ 1664, 1871, 137, 528, 1519, 1522, 630, 527, 344, 630, - /* 540 */ 185, 1464, 526, 531, 268, 198, 197, 1869, 525, 1438, - /* 550 */ 54, 1436, 616, 421, 131, 130, 129, 128, 127, 126, - /* 560 */ 125, 124, 123, 1412, 1413, 417, 1181, 1818, 460, 416, - /* 570 */ 1818, 41, 40, 39, 2037, 1441, 1442, 616, 1492, 1495, - /* 580 */ 1496, 1497, 1498, 1499, 1500, 1501, 1502, 642, 638, 1511, - /* 590 */ 1512, 1514, 1515, 1516, 1517, 2, 45, 43, 1446, 614, - /* 600 */ 1916, 1930, 1182, 1656, 378, 2055, 1437, 630, 593, 630, - /* 610 */ 568, 190, 2196, 647, 2196, 1374, 1375, 1518, 2005, 1435, - /* 620 */ 646, 422, 221, 431, 625, 61, 1930, 592, 182, 2202, - /* 630 */ 182, 49, 2197, 594, 2197, 594, 185, 171, 1818, 1685, - /* 640 */ 1818, 441, 1513, 515, 511, 507, 503, 218, 630, 2036, - /* 650 */ 440, 1373, 1376, 2072, 1616, 1443, 110, 2038, 650, 2040, - /* 660 */ 2041, 645, 446, 640, 424, 370, 1528, 1684, 45, 43, - /* 670 */ 2125, 1465, 1462, 164, 2124, 2121, 378, 425, 1437, 1818, - /* 680 */ 46, 539, 1820, 2005, 87, 1683, 268, 216, 1443, 1518, - /* 690 */ 1682, 1435, 142, 38, 37, 2096, 549, 44, 42, 41, - /* 700 */ 40, 39, 737, 580, 579, 1614, 1615, 1617, 1618, 1619, - /* 710 */ 234, 2005, 1871, 1809, 1513, 38, 37, 1520, 1521, 44, - /* 720 */ 42, 41, 40, 39, 2019, 542, 33, 1443, 1870, 2005, - /* 730 */ 536, 1795, 38, 37, 2005, 233, 44, 42, 41, 40, - /* 740 */ 39, 1681, 593, 1680, 1679, 1549, 2196, 1493, 1503, 1449, - /* 750 */ 630, 27, 14, 1519, 1522, 215, 209, 675, 2015, 2021, - /* 760 */ 214, 592, 182, 494, 447, 1803, 2197, 594, 1438, 640, - /* 770 */ 1436, 164, 69, 1678, 737, 68, 2055, 185, 1675, 207, - /* 780 */ 1821, 1818, 1627, 185, 587, 2005, 1974, 2005, 2005, 1520, - /* 790 */ 1521, 1674, 13, 12, 1441, 1442, 696, 1492, 1495, 1496, - /* 800 */ 1497, 1498, 1499, 1500, 1501, 1502, 642, 638, 1511, 1512, - /* 810 */ 1514, 1515, 1516, 1517, 2, 1494, 191, 2005, 32, 1493, - /* 820 */ 1503, 332, 2005, 1460, 682, 1519, 1522, 235, 1554, 586, - /* 830 */ 454, 86, 237, 468, 1871, 2005, 467, 1137, 1138, 682, - /* 840 */ 1438, 371, 1436, 154, 153, 679, 678, 677, 151, 1673, - /* 850 */ 1869, 437, 1465, 469, 1814, 1794, 439, 1916, 154, 153, - /* 860 */ 679, 678, 677, 151, 530, 529, 1441, 1442, 192, 1492, - /* 870 */ 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 642, 638, - /* 880 */ 1511, 1512, 1514, 1515, 1516, 1517, 2, 381, 1561, 563, - /* 890 */ 89, 334, 165, 2005, 355, 164, 550, 307, 347, 1730, - /* 900 */ 630, 38, 37, 1793, 1820, 44, 42, 41, 40, 39, - /* 910 */ 427, 305, 72, 1805, 497, 71, 1672, 8, 684, 38, - /* 920 */ 37, 1871, 384, 44, 42, 41, 40, 39, 382, 301, - /* 930 */ 164, 1818, 1848, 203, 476, 474, 471, 1869, 2140, 1820, - /* 940 */ 465, 708, 706, 459, 458, 457, 456, 453, 452, 451, - /* 950 */ 450, 449, 445, 444, 443, 442, 331, 434, 433, 432, - /* 960 */ 2005, 429, 428, 345, 2136, 714, 713, 712, 711, 388, - /* 970 */ 61, 710, 709, 144, 704, 703, 702, 701, 700, 699, - /* 980 */ 698, 156, 694, 693, 692, 387, 386, 689, 688, 687, - /* 990 */ 686, 685, 630, 630, 1671, 38, 37, 245, 1998, 44, - /* 1000 */ 42, 41, 40, 39, 1437, 630, 1815, 238, 1604, 108, - /* 1010 */ 2037, 682, 2200, 630, 630, 630, 1585, 1435, 697, 627, - /* 1020 */ 1788, 1670, 1669, 1818, 1818, 1668, 1667, 628, 564, 609, - /* 1030 */ 154, 153, 679, 678, 677, 151, 1818, 1916, 2005, 523, - /* 1040 */ 51, 2055, 3, 568, 1818, 1818, 1818, 2196, 196, 608, - /* 1050 */ 80, 79, 420, 1443, 2005, 189, 646, 630, 163, 2037, - /* 1060 */ 2140, 522, 2202, 182, 152, 2005, 2005, 2197, 594, 2005, - /* 1070 */ 2005, 613, 680, 2023, 330, 1862, 1462, 406, 73, 404, - /* 1080 */ 400, 396, 393, 413, 2019, 2036, 2135, 244, 1818, 2072, - /* 1090 */ 2055, 2024, 109, 2038, 650, 2040, 2041, 645, 647, 640, - /* 1100 */ 737, 597, 2019, 2005, 179, 646, 2125, 2145, 1581, 630, - /* 1110 */ 372, 2121, 681, 1991, 146, 1862, 134, 53, 2015, 2021, - /* 1120 */ 373, 630, 185, 282, 184, 1581, 2037, 90, 81, 640, - /* 1130 */ 1999, 1801, 2151, 152, 2036, 288, 2015, 2021, 2072, 674, - /* 1140 */ 1818, 109, 2038, 650, 2040, 2041, 645, 640, 640, 630, - /* 1150 */ 152, 143, 1818, 149, 2096, 2125, 241, 2055, 2037, 372, - /* 1160 */ 2121, 398, 1720, 385, 226, 608, 1438, 224, 1436, 1713, - /* 1170 */ 2005, 228, 646, 230, 227, 568, 229, 63, 232, 2196, - /* 1180 */ 1818, 231, 63, 250, 535, 552, 1407, 551, 637, 2055, - /* 1190 */ 1677, 537, 1441, 1442, 2202, 182, 1711, 647, 641, 2197, - /* 1200 */ 594, 2036, 2005, 1410, 646, 2072, 1445, 2037, 109, 2038, - /* 1210 */ 650, 2040, 2041, 645, 52, 640, 1659, 1660, 540, 1494, - /* 1220 */ 179, 567, 2125, 152, 47, 1707, 372, 2121, 275, 2165, - /* 1230 */ 1613, 70, 150, 2036, 152, 1612, 252, 2072, 2055, 1773, - /* 1240 */ 109, 2038, 650, 2040, 2041, 645, 647, 640, 2152, 13, - /* 1250 */ 12, 2005, 2216, 646, 2125, 105, 63, 555, 372, 2121, - /* 1260 */ 47, 1584, 47, 654, 150, 102, 2037, 263, 581, 2159, - /* 1270 */ 219, 152, 135, 690, 732, 257, 612, 1371, 596, 150, - /* 1280 */ 2056, 280, 2036, 389, 622, 284, 2072, 1252, 1701, 109, - /* 1290 */ 2038, 650, 2040, 2041, 645, 1200, 640, 2055, 691, 1925, - /* 1300 */ 2037, 2216, 568, 2125, 1859, 647, 2196, 372, 2121, 1555, - /* 1310 */ 2005, 2155, 646, 1504, 606, 300, 1280, 1284, 2172, 265, - /* 1320 */ 1198, 2202, 182, 262, 1291, 1289, 2197, 594, 598, 1, - /* 1330 */ 4, 2055, 155, 392, 1539, 397, 1394, 295, 195, 647, - /* 1340 */ 343, 2036, 426, 1465, 2005, 2072, 646, 1926, 109, 2038, - /* 1350 */ 650, 2040, 2041, 645, 430, 640, 463, 1448, 435, 1460, - /* 1360 */ 2216, 448, 2125, 1918, 462, 455, 372, 2121, 464, 470, - /* 1370 */ 472, 200, 473, 2037, 475, 2036, 477, 575, 1466, 2072, - /* 1380 */ 478, 487, 109, 2038, 650, 2040, 2041, 645, 1468, 640, - /* 1390 */ 206, 490, 375, 374, 2216, 1463, 2125, 208, 491, 1467, - /* 1400 */ 372, 2121, 1451, 492, 2055, 1469, 493, 211, 495, 499, - /* 1410 */ 213, 2190, 647, 1518, 84, 1444, 1154, 2005, 518, 646, - /* 1420 */ 85, 217, 2037, 516, 517, 112, 333, 88, 520, 1808, - /* 1430 */ 1980, 554, 556, 1979, 148, 296, 223, 239, 1513, 1804, - /* 1440 */ 558, 225, 242, 559, 565, 2156, 157, 582, 2036, 158, - /* 1450 */ 1806, 1443, 2072, 2055, 1802, 109, 2038, 650, 2040, 2041, - /* 1460 */ 645, 647, 640, 159, 160, 2171, 2005, 2216, 646, 2125, - /* 1470 */ 620, 572, 562, 372, 2121, 2166, 7, 2170, 578, 361, - /* 1480 */ 591, 571, 585, 573, 2144, 2147, 2037, 248, 251, 256, - /* 1490 */ 172, 570, 258, 259, 2219, 139, 260, 2036, 636, 362, - /* 1500 */ 264, 2072, 602, 599, 109, 2038, 650, 2040, 2041, 645, - /* 1510 */ 1581, 640, 1464, 2195, 610, 270, 2100, 2055, 2125, 261, - /* 1520 */ 2141, 365, 372, 2121, 1470, 647, 95, 297, 1931, 623, - /* 1530 */ 2005, 618, 646, 619, 298, 1945, 1944, 624, 1943, 368, - /* 1540 */ 97, 99, 60, 299, 1819, 2106, 101, 1789, 652, 291, - /* 1550 */ 302, 1863, 733, 326, 2037, 734, 311, 335, 336, 736, - /* 1560 */ 325, 2036, 50, 315, 1452, 2072, 1447, 306, 109, 2038, - /* 1570 */ 650, 2040, 2041, 645, 1997, 640, 1996, 1995, 304, 77, - /* 1580 */ 2098, 1992, 2125, 394, 2037, 2055, 372, 2121, 395, 1428, - /* 1590 */ 1455, 1457, 1429, 647, 188, 399, 1990, 401, 2005, 402, - /* 1600 */ 646, 403, 1989, 638, 1511, 1512, 1514, 1515, 1516, 1517, - /* 1610 */ 405, 1988, 1987, 407, 2037, 2055, 409, 1986, 78, 411, - /* 1620 */ 1397, 1396, 1957, 647, 1956, 1955, 1954, 418, 2005, 2036, - /* 1630 */ 646, 419, 1953, 2072, 1348, 1909, 109, 2038, 650, 2040, - /* 1640 */ 2041, 645, 1908, 640, 2037, 2055, 1906, 145, 633, 1905, - /* 1650 */ 2125, 1904, 1907, 647, 372, 2121, 1903, 1902, 2005, 2036, - /* 1660 */ 646, 1900, 1899, 2072, 193, 436, 110, 2038, 650, 2040, - /* 1670 */ 2041, 645, 1898, 640, 1897, 2055, 438, 1911, 2037, 1896, - /* 1680 */ 2125, 1895, 1894, 647, 635, 2121, 1893, 1892, 2005, 648, - /* 1690 */ 646, 1891, 1890, 2072, 1889, 1888, 110, 2038, 650, 2040, - /* 1700 */ 2041, 645, 1887, 640, 1886, 2037, 1885, 1884, 1883, 2055, - /* 1710 */ 2125, 1882, 147, 1881, 338, 2121, 1880, 647, 1879, 2036, - /* 1720 */ 1910, 1878, 2005, 2072, 646, 1877, 169, 2038, 650, 2040, - /* 1730 */ 2041, 645, 1350, 640, 1876, 1875, 2055, 1874, 1873, 466, - /* 1740 */ 1872, 1736, 199, 1735, 647, 1734, 201, 1227, 202, 2005, - /* 1750 */ 1732, 646, 2025, 2036, 2037, 1696, 204, 2072, 75, 177, - /* 1760 */ 168, 2038, 650, 2040, 2041, 645, 1695, 640, 1140, 483, - /* 1770 */ 1139, 485, 205, 1970, 1964, 76, 1952, 1951, 595, 2217, - /* 1780 */ 2036, 210, 212, 1929, 2072, 2055, 1797, 110, 2038, 650, - /* 1790 */ 2040, 2041, 645, 647, 640, 1174, 1731, 1729, 2005, 500, - /* 1800 */ 646, 2125, 2163, 1727, 502, 501, 2122, 504, 505, 506, - /* 1810 */ 1725, 509, 1723, 508, 510, 512, 2037, 513, 1710, 1709, - /* 1820 */ 514, 1692, 1799, 1296, 1295, 1798, 1218, 1217, 62, 2036, - /* 1830 */ 222, 705, 1721, 2072, 1216, 1215, 317, 2038, 650, 2040, - /* 1840 */ 2041, 645, 707, 640, 1212, 1210, 1211, 2055, 2037, 1209, - /* 1850 */ 352, 1714, 353, 1712, 538, 647, 354, 1691, 1690, 541, - /* 1860 */ 2005, 543, 646, 1689, 545, 547, 111, 26, 1417, 1969, - /* 1870 */ 1418, 1416, 1403, 1963, 55, 1950, 560, 1948, 66, 2055, - /* 1880 */ 590, 1420, 561, 162, 367, 2201, 19, 647, 16, 28, - /* 1890 */ 247, 2036, 2005, 243, 646, 2072, 358, 2037, 169, 2038, - /* 1900 */ 650, 2040, 2041, 645, 1629, 640, 566, 574, 58, 576, - /* 1910 */ 249, 5, 59, 1611, 2037, 254, 6, 1603, 20, 255, - /* 1920 */ 30, 2026, 64, 2036, 1649, 17, 1644, 2072, 2055, 21, - /* 1930 */ 324, 2038, 650, 2040, 2041, 645, 644, 640, 1650, 1643, - /* 1940 */ 170, 2005, 363, 646, 253, 2055, 2037, 1648, 29, 1647, - /* 1950 */ 377, 2218, 91, 647, 57, 364, 1578, 1577, 2005, 1949, - /* 1960 */ 646, 1947, 1946, 267, 173, 1928, 93, 94, 273, 1927, - /* 1970 */ 96, 2037, 2036, 621, 56, 22, 2072, 2055, 23, 323, - /* 1980 */ 2038, 650, 2040, 2041, 645, 647, 640, 274, 2091, 2036, - /* 1990 */ 2005, 1609, 646, 2072, 102, 2037, 324, 2038, 650, 2040, - /* 2000 */ 2041, 645, 2055, 640, 276, 281, 283, 379, 12, 67, - /* 2010 */ 647, 286, 1453, 174, 98, 2005, 1530, 646, 1540, 2075, - /* 2020 */ 2037, 553, 10, 1508, 1506, 2072, 2055, 1505, 319, 2038, - /* 2030 */ 650, 2040, 2041, 645, 647, 640, 639, 1529, 36, 2005, - /* 2040 */ 15, 646, 24, 649, 186, 1477, 2036, 1485, 25, 653, - /* 2050 */ 2072, 2055, 651, 324, 2038, 650, 2040, 2041, 645, 647, - /* 2060 */ 640, 380, 657, 655, 2005, 1281, 646, 1278, 1275, 658, - /* 2070 */ 2036, 660, 661, 663, 2072, 1269, 664, 308, 2038, 650, - /* 2080 */ 2040, 2041, 645, 2037, 640, 1267, 666, 667, 1258, 103, - /* 2090 */ 673, 289, 1290, 1286, 1273, 2036, 104, 1272, 74, 2072, - /* 2100 */ 1172, 1271, 309, 2038, 650, 2040, 2041, 645, 683, 640, - /* 2110 */ 2037, 1206, 1270, 1205, 2055, 1204, 1203, 1202, 1201, 1225, - /* 2120 */ 1199, 1197, 647, 1196, 1195, 695, 1193, 2005, 1192, 646, - /* 2130 */ 1191, 290, 2037, 1189, 1190, 1188, 1187, 1220, 1222, 1184, - /* 2140 */ 1183, 2055, 1180, 1179, 1178, 1177, 1728, 716, 715, 647, - /* 2150 */ 1726, 719, 1724, 717, 2005, 1722, 646, 720, 2036, 721, - /* 2160 */ 723, 1708, 2072, 2055, 727, 310, 2038, 650, 2040, 2041, - /* 2170 */ 645, 647, 640, 725, 724, 728, 2005, 729, 646, 731, - /* 2180 */ 1129, 1688, 293, 735, 738, 2036, 1439, 303, 739, 2072, - /* 2190 */ 1663, 1663, 316, 2038, 650, 2040, 2041, 645, 1663, 640, - /* 2200 */ 1663, 1663, 1663, 1663, 1663, 1663, 1663, 2036, 1663, 1663, - /* 2210 */ 1663, 2072, 2037, 1663, 320, 2038, 650, 2040, 2041, 645, - /* 2220 */ 1663, 640, 1663, 1663, 1663, 1663, 1663, 1663, 1663, 2037, - /* 2230 */ 1663, 1663, 1663, 1663, 1663, 1663, 1663, 1663, 1663, 1663, - /* 2240 */ 1663, 1663, 1663, 2055, 1663, 1663, 2037, 1663, 1663, 1663, - /* 2250 */ 1663, 647, 1663, 1663, 1663, 1663, 2005, 1663, 646, 1663, - /* 2260 */ 2055, 1663, 1663, 1663, 1663, 1663, 1663, 1663, 647, 1663, - /* 2270 */ 1663, 1663, 1663, 2005, 1663, 646, 1663, 2055, 2037, 1663, - /* 2280 */ 1663, 1663, 1663, 1663, 1663, 647, 1663, 2036, 1663, 1663, - /* 2290 */ 2005, 2072, 646, 1663, 312, 2038, 650, 2040, 2041, 645, - /* 2300 */ 1663, 640, 1663, 2037, 2036, 1663, 1663, 1663, 2072, 2055, - /* 2310 */ 1663, 321, 2038, 650, 2040, 2041, 645, 647, 640, 1663, - /* 2320 */ 1663, 2036, 2005, 1663, 646, 2072, 1663, 2037, 313, 2038, - /* 2330 */ 650, 2040, 2041, 645, 2055, 640, 1663, 1663, 1663, 1663, - /* 2340 */ 1663, 1663, 647, 1663, 1663, 1663, 1663, 2005, 1663, 646, - /* 2350 */ 1663, 1663, 2037, 2036, 1663, 1663, 1663, 2072, 2055, 1663, - /* 2360 */ 322, 2038, 650, 2040, 2041, 645, 647, 640, 1663, 1663, - /* 2370 */ 1663, 2005, 1663, 646, 1663, 1663, 1663, 1663, 2036, 1663, - /* 2380 */ 1663, 1663, 2072, 2055, 1663, 314, 2038, 650, 2040, 2041, - /* 2390 */ 645, 647, 640, 1663, 1663, 1663, 2005, 1663, 646, 1663, - /* 2400 */ 1663, 1663, 2036, 1663, 1663, 1663, 2072, 1663, 1663, 327, - /* 2410 */ 2038, 650, 2040, 2041, 645, 2037, 640, 1663, 1663, 1663, - /* 2420 */ 1663, 1663, 1663, 1663, 1663, 1663, 1663, 2036, 1663, 1663, - /* 2430 */ 1663, 2072, 1663, 1663, 328, 2038, 650, 2040, 2041, 645, - /* 2440 */ 1663, 640, 2037, 1663, 1663, 1663, 2055, 1663, 1663, 1663, - /* 2450 */ 1663, 1663, 1663, 1663, 647, 1663, 1663, 1663, 1663, 2005, - /* 2460 */ 1663, 646, 1663, 1663, 2037, 1663, 1663, 1663, 1663, 1663, - /* 2470 */ 1663, 1663, 1663, 2055, 1663, 1663, 1663, 1663, 1663, 1663, - /* 2480 */ 1663, 647, 1663, 1663, 1663, 1663, 2005, 1663, 646, 1663, - /* 2490 */ 2036, 1663, 1663, 1663, 2072, 2055, 1663, 2049, 2038, 650, - /* 2500 */ 2040, 2041, 645, 647, 640, 1663, 1663, 1663, 2005, 1663, - /* 2510 */ 646, 1663, 1663, 1663, 1663, 1663, 1663, 2036, 1663, 1663, - /* 2520 */ 1663, 2072, 1663, 1663, 2048, 2038, 650, 2040, 2041, 645, - /* 2530 */ 1663, 640, 1663, 1663, 1663, 1663, 1663, 1663, 1663, 2036, - /* 2540 */ 1663, 1663, 1663, 2072, 2037, 1663, 2047, 2038, 650, 2040, - /* 2550 */ 2041, 645, 1663, 640, 1663, 1663, 1663, 1663, 1663, 1663, - /* 2560 */ 1663, 2037, 1663, 1663, 1663, 1663, 1663, 1663, 1663, 1663, - /* 2570 */ 1663, 1663, 1663, 1663, 1663, 2055, 1663, 1663, 2037, 1663, - /* 2580 */ 1663, 1663, 1663, 647, 1663, 1663, 1663, 1663, 2005, 1663, - /* 2590 */ 646, 1663, 2055, 1663, 1663, 1663, 1663, 1663, 1663, 1663, - /* 2600 */ 647, 1663, 1663, 1663, 1663, 2005, 1663, 646, 1663, 2055, - /* 2610 */ 2037, 1663, 1663, 1663, 1663, 1663, 1663, 647, 1663, 2036, - /* 2620 */ 1663, 1663, 2005, 2072, 646, 1663, 340, 2038, 650, 2040, - /* 2630 */ 2041, 645, 1663, 640, 1663, 2037, 2036, 1663, 1663, 1663, - /* 2640 */ 2072, 2055, 1663, 341, 2038, 650, 2040, 2041, 645, 647, - /* 2650 */ 640, 1663, 1663, 2036, 2005, 1663, 646, 2072, 1663, 2037, - /* 2660 */ 337, 2038, 650, 2040, 2041, 645, 2055, 640, 1663, 1663, - /* 2670 */ 1663, 1663, 1663, 1663, 647, 1663, 1663, 1663, 1663, 2005, - /* 2680 */ 1663, 646, 1663, 1663, 1663, 2036, 1663, 1663, 1663, 2072, - /* 2690 */ 2055, 1663, 342, 2038, 650, 2040, 2041, 645, 647, 640, - /* 2700 */ 1663, 1663, 1663, 2005, 1663, 646, 1663, 1663, 1663, 1663, - /* 2710 */ 648, 1663, 1663, 1663, 2072, 1663, 1663, 319, 2038, 650, - /* 2720 */ 2040, 2041, 645, 1663, 640, 1663, 1663, 1663, 1663, 1663, - /* 2730 */ 1663, 1663, 1663, 1663, 2036, 1663, 1663, 1663, 2072, 1663, - /* 2740 */ 1663, 318, 2038, 650, 2040, 2041, 645, 1663, 640, + /* 0 */ 1940, 2205, 1799, 606, 481, 2200, 482, 1703, 490, 1810, + /* 10 */ 482, 1703, 45, 43, 1591, 1938, 618, 31, 176, 178, + /* 20 */ 379, 2204, 1440, 38, 37, 2201, 2203, 44, 42, 41, + /* 30 */ 40, 39, 1861, 1521, 140, 1438, 1465, 2041, 1874, 347, + /* 40 */ 1923, 2027, 38, 37, 612, 357, 44, 42, 41, 40, + /* 50 */ 39, 424, 2023, 2205, 1872, 38, 37, 2200, 1516, 44, + /* 60 */ 42, 41, 40, 39, 18, 499, 384, 1468, 2059, 1867, + /* 70 */ 1869, 1446, 1665, 2204, 167, 606, 645, 2201, 2202, 1775, + /* 80 */ 1153, 2009, 1152, 647, 45, 43, 2019, 2025, 360, 569, + /* 90 */ 1134, 330, 379, 2200, 1440, 220, 14, 641, 340, 181, + /* 100 */ 2137, 2138, 549, 138, 2142, 1521, 140, 1438, 2206, 182, + /* 110 */ 601, 1154, 2040, 2201, 595, 547, 2076, 545, 738, 324, + /* 120 */ 2042, 651, 2044, 2045, 646, 644, 641, 632, 2094, 1136, + /* 130 */ 1516, 1139, 1140, 1523, 1524, 630, 18, 480, 392, 1550, + /* 140 */ 485, 1709, 391, 1446, 1690, 1261, 673, 672, 671, 1265, + /* 150 */ 670, 1267, 1268, 669, 1270, 666, 176, 1276, 663, 1278, + /* 160 */ 1279, 660, 657, 1496, 1506, 1940, 606, 616, 14, 1522, + /* 170 */ 1525, 267, 2137, 605, 382, 133, 604, 370, 1924, 2200, + /* 180 */ 1937, 618, 161, 569, 1441, 617, 1439, 2200, 2009, 358, + /* 190 */ 738, 1823, 630, 270, 593, 182, 1551, 140, 1872, 2201, + /* 200 */ 595, 499, 2206, 182, 630, 1523, 1524, 2201, 595, 589, + /* 210 */ 1444, 1445, 247, 1495, 1498, 1499, 1500, 1501, 1502, 1503, + /* 220 */ 1504, 1505, 643, 639, 1514, 1515, 1517, 1518, 1519, 1520, + /* 230 */ 2, 61, 497, 92, 1933, 1496, 1506, 584, 106, 685, + /* 240 */ 122, 1522, 1525, 121, 120, 119, 118, 117, 116, 115, + /* 250 */ 114, 113, 141, 1595, 351, 166, 1441, 1679, 1439, 1465, + /* 260 */ 1813, 608, 180, 2137, 2138, 1464, 138, 2142, 48, 34, + /* 270 */ 377, 1545, 1546, 1547, 1548, 1549, 1553, 1554, 1555, 1556, + /* 280 */ 48, 61, 1444, 1445, 1221, 1495, 1498, 1499, 1500, 1501, + /* 290 */ 1502, 1503, 1504, 1505, 643, 639, 1514, 1515, 1517, 1518, + /* 300 */ 1519, 1520, 2, 2027, 11, 45, 43, 44, 42, 41, + /* 310 */ 40, 39, 1465, 379, 2023, 1440, 352, 741, 350, 349, + /* 320 */ 1223, 522, 590, 585, 578, 524, 1521, 1465, 1438, 489, + /* 330 */ 2041, 295, 485, 1709, 606, 35, 288, 38, 37, 602, + /* 340 */ 411, 44, 42, 41, 40, 39, 175, 523, 2019, 2025, + /* 350 */ 361, 1516, 731, 727, 723, 719, 293, 18, 86, 641, + /* 360 */ 487, 2059, 413, 409, 1446, 140, 483, 558, 416, 648, + /* 370 */ 415, 2144, 136, 1153, 2009, 1152, 647, 45, 43, 1526, + /* 380 */ 1466, 1816, 2205, 185, 11, 379, 9, 1440, 61, 14, + /* 390 */ 279, 280, 65, 107, 414, 278, 286, 2141, 1521, 1467, + /* 400 */ 1438, 633, 1497, 2101, 1154, 2040, 1736, 631, 1689, 2076, + /* 410 */ 631, 738, 168, 2042, 651, 2044, 2045, 646, 1668, 641, + /* 420 */ 185, 132, 677, 1516, 187, 1865, 1523, 1524, 520, 627, + /* 430 */ 183, 2137, 2138, 185, 138, 2142, 1446, 631, 1821, 122, + /* 440 */ 11, 1821, 121, 120, 119, 118, 117, 116, 115, 114, + /* 450 */ 113, 132, 2009, 570, 2166, 194, 1496, 1506, 525, 1874, + /* 460 */ 100, 46, 1522, 1525, 273, 635, 367, 2101, 1821, 272, + /* 470 */ 1359, 1360, 535, 534, 533, 1872, 1653, 1441, 1797, 1439, + /* 480 */ 137, 529, 1814, 738, 61, 528, 1403, 462, 241, 1904, + /* 490 */ 527, 532, 83, 1304, 1305, 82, 526, 237, 1523, 1524, + /* 500 */ 1868, 1869, 1466, 1444, 1445, 1660, 1495, 1498, 1499, 1500, + /* 510 */ 1501, 1502, 1503, 1504, 1505, 643, 639, 1514, 1515, 1517, + /* 520 */ 1518, 1519, 1520, 2, 535, 534, 533, 1446, 1496, 1506, + /* 530 */ 1667, 1874, 137, 529, 1522, 1525, 631, 528, 345, 631, + /* 540 */ 185, 685, 527, 532, 269, 198, 197, 1872, 526, 1441, + /* 550 */ 54, 1439, 617, 422, 131, 130, 129, 128, 127, 126, + /* 560 */ 125, 124, 123, 1414, 1415, 418, 676, 1821, 461, 417, + /* 570 */ 1821, 41, 40, 39, 2041, 1444, 1445, 617, 1495, 1498, + /* 580 */ 1499, 1500, 1501, 1502, 1503, 1504, 1505, 643, 639, 1514, + /* 590 */ 1515, 1517, 1518, 1519, 1520, 2, 45, 43, 1467, 615, + /* 600 */ 1919, 1933, 86, 1659, 379, 2059, 1440, 631, 594, 631, + /* 610 */ 569, 190, 2200, 648, 2200, 1376, 1377, 1521, 2009, 1438, + /* 620 */ 647, 423, 221, 432, 626, 1817, 1933, 593, 182, 2206, + /* 630 */ 182, 61, 2201, 595, 2201, 595, 185, 171, 1821, 1688, + /* 640 */ 1821, 442, 1516, 516, 512, 508, 504, 218, 631, 2040, + /* 650 */ 441, 1375, 1378, 2076, 1619, 1446, 110, 2042, 651, 2044, + /* 660 */ 2045, 646, 447, 641, 49, 371, 1531, 1687, 45, 43, + /* 670 */ 2129, 2204, 1465, 164, 2128, 2125, 379, 697, 1440, 1821, + /* 680 */ 46, 540, 1823, 2009, 87, 1686, 2144, 216, 1806, 1521, + /* 690 */ 1685, 1438, 142, 38, 37, 2100, 550, 44, 42, 41, + /* 700 */ 40, 39, 738, 581, 580, 1617, 1618, 1620, 1621, 1622, + /* 710 */ 234, 2009, 2140, 1684, 1516, 38, 37, 1523, 1524, 44, + /* 720 */ 42, 41, 40, 39, 236, 543, 33, 1446, 235, 2009, + /* 730 */ 537, 1798, 38, 37, 2009, 233, 44, 42, 41, 40, + /* 740 */ 39, 1683, 594, 269, 1552, 2144, 2200, 1496, 1506, 1874, + /* 750 */ 631, 27, 14, 1522, 1525, 215, 209, 2009, 13, 12, + /* 760 */ 214, 593, 182, 495, 448, 1873, 2201, 595, 1441, 382, + /* 770 */ 1439, 2139, 69, 1588, 738, 68, 2059, 164, 1682, 207, + /* 780 */ 1808, 1821, 1630, 185, 588, 2009, 1823, 89, 335, 1523, + /* 790 */ 1524, 356, 1977, 551, 1444, 1445, 1468, 1495, 1498, 1499, + /* 800 */ 1500, 1501, 1502, 1503, 1504, 1505, 643, 639, 1514, 1515, + /* 810 */ 1517, 1518, 1519, 1520, 2, 1497, 185, 32, 1804, 1496, + /* 820 */ 1506, 333, 2009, 1463, 683, 1522, 1525, 1557, 164, 587, + /* 830 */ 455, 709, 707, 469, 1874, 152, 468, 1824, 238, 683, + /* 840 */ 1441, 372, 1439, 154, 153, 680, 679, 678, 151, 1681, + /* 850 */ 1872, 438, 1678, 470, 1564, 675, 440, 1919, 154, 153, + /* 860 */ 680, 679, 678, 151, 531, 530, 1444, 1445, 192, 1495, + /* 870 */ 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 643, 639, + /* 880 */ 1514, 1515, 1517, 1518, 1519, 1520, 2, 698, 53, 1791, + /* 890 */ 631, 681, 165, 2009, 1865, 1677, 2009, 308, 348, 1733, + /* 900 */ 631, 38, 37, 1796, 498, 44, 42, 41, 40, 39, + /* 910 */ 428, 306, 72, 242, 1818, 71, 1676, 1919, 642, 38, + /* 920 */ 37, 1821, 1874, 44, 42, 41, 40, 39, 196, 383, + /* 930 */ 51, 1821, 3, 203, 477, 475, 472, 597, 1872, 2009, + /* 940 */ 466, 191, 524, 460, 459, 458, 457, 454, 453, 452, + /* 950 */ 451, 450, 446, 445, 444, 443, 332, 435, 434, 433, + /* 960 */ 2009, 430, 429, 346, 523, 715, 714, 713, 712, 389, + /* 970 */ 61, 711, 710, 144, 705, 704, 703, 702, 701, 700, + /* 980 */ 699, 156, 695, 694, 693, 388, 387, 690, 689, 688, + /* 990 */ 687, 686, 631, 631, 1675, 38, 37, 564, 2002, 44, + /* 1000 */ 42, 41, 40, 39, 1440, 631, 239, 565, 1607, 108, + /* 1010 */ 2041, 683, 1995, 631, 631, 631, 1468, 1438, 1587, 628, + /* 1020 */ 8, 1674, 1673, 1821, 1821, 1672, 1671, 629, 610, 614, + /* 1030 */ 154, 153, 680, 679, 678, 151, 1821, 682, 2009, 1465, + /* 1040 */ 1865, 2059, 1670, 569, 1821, 1821, 1821, 2200, 152, 609, + /* 1050 */ 80, 79, 421, 1446, 2009, 189, 647, 631, 163, 2041, + /* 1060 */ 399, 302, 2206, 182, 1851, 2009, 2009, 2201, 595, 2009, + /* 1070 */ 2009, 283, 598, 2027, 331, 1139, 1140, 407, 1449, 405, + /* 1080 */ 401, 397, 394, 414, 2023, 2040, 2009, 1680, 1821, 2076, + /* 1090 */ 2059, 1812, 109, 2042, 651, 2044, 2045, 646, 648, 641, + /* 1100 */ 738, 1409, 2023, 2009, 179, 647, 2129, 1776, 385, 631, + /* 1110 */ 373, 2125, 2149, 1584, 425, 638, 164, 2028, 2019, 2025, + /* 1120 */ 374, 631, 185, 386, 184, 1823, 2041, 426, 2023, 641, + /* 1130 */ 2003, 146, 2155, 134, 2040, 289, 2019, 2025, 2076, 1710, + /* 1140 */ 1821, 109, 2042, 651, 2044, 2045, 646, 641, 641, 2169, + /* 1150 */ 152, 143, 1821, 149, 2100, 2129, 245, 2059, 2041, 373, + /* 1160 */ 2125, 246, 2019, 2025, 73, 609, 1441, 2030, 1439, 1723, + /* 1170 */ 2009, 226, 647, 641, 224, 569, 1716, 228, 230, 2200, + /* 1180 */ 227, 229, 1497, 232, 1714, 553, 231, 552, 733, 2059, + /* 1190 */ 264, 536, 1444, 1445, 2206, 182, 90, 648, 538, 2201, + /* 1200 */ 595, 2040, 2009, 1412, 647, 2076, 541, 2041, 109, 2042, + /* 1210 */ 651, 2044, 2045, 646, 81, 641, 2032, 63, 63, 251, + /* 1220 */ 179, 152, 2129, 1662, 1663, 52, 373, 2125, 1584, 1452, + /* 1230 */ 47, 276, 568, 2040, 70, 13, 12, 2076, 2059, 1448, + /* 1240 */ 109, 2042, 651, 2044, 2045, 646, 648, 641, 2156, 1183, + /* 1250 */ 582, 2009, 2220, 647, 2129, 105, 150, 556, 373, 2125, + /* 1260 */ 152, 1542, 63, 47, 47, 102, 2041, 219, 258, 2163, + /* 1270 */ 1616, 1615, 253, 691, 613, 655, 150, 152, 2060, 135, + /* 1280 */ 150, 390, 2040, 1373, 281, 1184, 2076, 623, 1704, 109, + /* 1290 */ 2042, 651, 2044, 2045, 646, 1202, 641, 2059, 692, 599, + /* 1300 */ 2041, 2220, 569, 2129, 1928, 648, 2200, 373, 2125, 285, + /* 1310 */ 2009, 1862, 647, 1254, 2159, 1558, 1507, 301, 2176, 607, + /* 1320 */ 1200, 2206, 182, 263, 266, 1, 2201, 595, 1282, 1286, + /* 1330 */ 1293, 2059, 1291, 155, 4, 393, 398, 344, 1396, 648, + /* 1340 */ 296, 2040, 195, 427, 2009, 2076, 647, 1468, 109, 2042, + /* 1350 */ 651, 2044, 2045, 646, 431, 641, 1929, 464, 436, 1463, + /* 1360 */ 2220, 449, 2129, 1921, 463, 456, 373, 2125, 465, 473, + /* 1370 */ 471, 200, 474, 2041, 476, 2040, 478, 576, 1469, 2076, + /* 1380 */ 479, 488, 109, 2042, 651, 2044, 2045, 646, 1471, 641, + /* 1390 */ 1451, 491, 376, 375, 2220, 206, 2129, 1466, 492, 208, + /* 1400 */ 373, 2125, 1454, 1470, 2059, 493, 1472, 211, 494, 496, + /* 1410 */ 517, 2194, 648, 1521, 1156, 1447, 518, 2009, 213, 647, + /* 1420 */ 84, 85, 2041, 521, 500, 217, 519, 1986, 1811, 223, + /* 1430 */ 334, 1983, 1807, 112, 225, 297, 555, 88, 1516, 1982, + /* 1440 */ 559, 157, 148, 158, 1809, 557, 240, 243, 2040, 1805, + /* 1450 */ 159, 1446, 2076, 2059, 566, 109, 2042, 651, 2044, 2045, + /* 1460 */ 646, 648, 641, 160, 583, 2175, 2009, 2220, 647, 2129, + /* 1470 */ 621, 2160, 2174, 373, 2125, 573, 563, 7, 2151, 592, + /* 1480 */ 579, 2170, 362, 586, 2148, 574, 2041, 257, 572, 571, + /* 1490 */ 172, 260, 249, 259, 603, 560, 600, 2040, 637, 252, + /* 1500 */ 363, 2076, 1584, 139, 109, 2042, 651, 2044, 2045, 646, + /* 1510 */ 265, 641, 1467, 271, 2223, 2199, 2104, 2059, 2129, 262, + /* 1520 */ 611, 95, 373, 2125, 2145, 648, 1473, 298, 366, 1934, + /* 1530 */ 2009, 624, 647, 619, 299, 620, 1948, 1947, 1946, 369, + /* 1540 */ 625, 97, 99, 300, 60, 101, 2110, 1866, 653, 1822, + /* 1550 */ 292, 734, 1792, 327, 2041, 2001, 261, 336, 337, 312, + /* 1560 */ 737, 2040, 735, 303, 1455, 2076, 1450, 2000, 109, 2042, + /* 1570 */ 651, 2044, 2045, 646, 305, 641, 307, 50, 1999, 77, + /* 1580 */ 2102, 1996, 2129, 395, 2041, 2059, 373, 2125, 396, 1431, + /* 1590 */ 1458, 1460, 1432, 648, 188, 326, 316, 400, 2009, 1994, + /* 1600 */ 647, 404, 1993, 639, 1514, 1515, 1517, 1518, 1519, 1520, + /* 1610 */ 402, 403, 406, 1992, 2041, 2059, 1991, 408, 410, 1990, + /* 1620 */ 412, 78, 1399, 648, 1398, 1960, 1959, 1958, 2009, 2040, + /* 1630 */ 647, 419, 420, 2076, 1957, 1956, 109, 2042, 651, 2044, + /* 1640 */ 2045, 646, 1350, 641, 2041, 2059, 1912, 1911, 634, 1909, + /* 1650 */ 2129, 145, 1908, 648, 373, 2125, 1907, 1910, 2009, 2040, + /* 1660 */ 647, 1906, 1905, 2076, 193, 437, 110, 2042, 651, 2044, + /* 1670 */ 2045, 646, 1903, 641, 1902, 2059, 1901, 1900, 2041, 439, + /* 1680 */ 2129, 1914, 1899, 648, 636, 2125, 1898, 1897, 2009, 649, + /* 1690 */ 647, 1896, 1895, 2076, 1894, 1893, 110, 2042, 651, 2044, + /* 1700 */ 2045, 646, 1892, 641, 1891, 2041, 1890, 1889, 1888, 2059, + /* 1710 */ 2129, 1887, 1886, 1885, 339, 2125, 1884, 648, 1883, 2040, + /* 1720 */ 147, 1882, 2009, 2076, 647, 1913, 169, 2042, 651, 2044, + /* 1730 */ 2045, 646, 1881, 641, 1880, 1879, 2059, 1352, 1878, 1877, + /* 1740 */ 467, 1876, 1875, 1739, 648, 1229, 199, 1738, 201, 2009, + /* 1750 */ 1737, 647, 2029, 2040, 2041, 202, 1735, 2076, 1699, 204, + /* 1760 */ 168, 2042, 651, 2044, 2045, 646, 75, 641, 177, 1142, + /* 1770 */ 484, 1141, 1698, 486, 1973, 1967, 205, 1955, 596, 2221, + /* 1780 */ 2040, 212, 1954, 76, 2076, 2059, 1932, 110, 2042, 651, + /* 1790 */ 2044, 2045, 646, 648, 641, 1800, 1176, 1734, 2009, 210, + /* 1800 */ 647, 2129, 2167, 1732, 501, 503, 2126, 502, 1730, 506, + /* 1810 */ 507, 505, 1728, 509, 1726, 510, 2041, 1713, 513, 511, + /* 1820 */ 515, 514, 1712, 1695, 1802, 62, 1298, 1297, 1801, 2040, + /* 1830 */ 1724, 222, 706, 2076, 1220, 1219, 318, 2042, 651, 2044, + /* 1840 */ 2045, 646, 1218, 641, 1217, 1214, 708, 2059, 2041, 1212, + /* 1850 */ 1213, 1211, 1717, 353, 354, 648, 539, 1715, 355, 1694, + /* 1860 */ 2009, 542, 647, 1693, 544, 1692, 548, 111, 546, 1419, + /* 1870 */ 1421, 1972, 1418, 1405, 55, 1966, 561, 1953, 1951, 2059, + /* 1880 */ 591, 1423, 2205, 26, 368, 66, 162, 648, 16, 244, + /* 1890 */ 19, 2040, 2009, 1632, 647, 2076, 575, 2041, 169, 2042, + /* 1900 */ 651, 2044, 2045, 646, 577, 641, 567, 28, 58, 248, + /* 1910 */ 562, 359, 5, 59, 2041, 250, 1614, 170, 255, 256, + /* 1920 */ 6, 254, 20, 2040, 30, 64, 1647, 2076, 2059, 2030, + /* 1930 */ 325, 2042, 651, 2044, 2045, 646, 645, 641, 29, 21, + /* 1940 */ 1606, 2009, 1652, 647, 91, 2059, 2041, 1653, 17, 1646, + /* 1950 */ 378, 2222, 364, 648, 1651, 1650, 365, 1581, 2009, 1580, + /* 1960 */ 647, 1952, 57, 268, 1950, 56, 1949, 1931, 94, 93, + /* 1970 */ 173, 2041, 2040, 274, 1930, 96, 2076, 2059, 287, 324, + /* 1980 */ 2042, 651, 2044, 2045, 646, 648, 641, 275, 2095, 2040, + /* 1990 */ 2009, 1612, 647, 2076, 102, 2041, 325, 2042, 651, 2044, + /* 2000 */ 2045, 646, 2059, 641, 22, 277, 282, 380, 622, 67, + /* 2010 */ 648, 12, 23, 1456, 1543, 2009, 1533, 647, 174, 284, + /* 2020 */ 2041, 554, 1511, 98, 1532, 2076, 2059, 10, 320, 2042, + /* 2030 */ 651, 2044, 2045, 646, 648, 641, 2079, 640, 36, 2009, + /* 2040 */ 1509, 647, 1508, 1480, 15, 24, 2040, 186, 1488, 25, + /* 2050 */ 2076, 2059, 654, 325, 2042, 651, 2044, 2045, 646, 648, + /* 2060 */ 641, 650, 652, 381, 2009, 656, 647, 1283, 658, 659, + /* 2070 */ 2040, 661, 1280, 1277, 2076, 662, 664, 309, 2042, 651, + /* 2080 */ 2044, 2045, 646, 2041, 641, 1271, 665, 667, 1260, 1269, + /* 2090 */ 668, 674, 290, 103, 104, 2040, 1292, 1275, 1274, 2076, + /* 2100 */ 1273, 1272, 310, 2042, 651, 2044, 2045, 646, 74, 641, + /* 2110 */ 2041, 1288, 1174, 684, 2059, 1208, 1207, 1206, 1205, 291, + /* 2120 */ 1204, 1203, 648, 1201, 1227, 1199, 1198, 2009, 1197, 647, + /* 2130 */ 696, 1195, 2041, 1194, 1193, 1192, 1191, 1190, 1189, 1224, + /* 2140 */ 1222, 2059, 1186, 1185, 1182, 1181, 1180, 1179, 1731, 648, + /* 2150 */ 716, 1729, 717, 718, 2009, 720, 647, 722, 2040, 1727, + /* 2160 */ 724, 726, 2076, 2059, 721, 311, 2042, 651, 2044, 2045, + /* 2170 */ 646, 648, 641, 1725, 725, 728, 2009, 729, 647, 1711, + /* 2180 */ 730, 732, 1131, 1691, 294, 2040, 736, 740, 1442, 2076, + /* 2190 */ 304, 739, 317, 2042, 651, 2044, 2045, 646, 1666, 641, + /* 2200 */ 1666, 1666, 1666, 1666, 1666, 1666, 1666, 2040, 1666, 1666, + /* 2210 */ 1666, 2076, 2041, 1666, 321, 2042, 651, 2044, 2045, 646, + /* 2220 */ 1666, 641, 1666, 1666, 1666, 1666, 1666, 1666, 1666, 2041, + /* 2230 */ 1666, 1666, 1666, 1666, 1666, 1666, 1666, 1666, 1666, 1666, + /* 2240 */ 1666, 1666, 1666, 2059, 1666, 1666, 2041, 1666, 1666, 1666, + /* 2250 */ 1666, 648, 1666, 1666, 1666, 1666, 2009, 1666, 647, 1666, + /* 2260 */ 2059, 1666, 1666, 1666, 1666, 1666, 1666, 1666, 648, 1666, + /* 2270 */ 1666, 1666, 1666, 2009, 1666, 647, 1666, 2059, 2041, 1666, + /* 2280 */ 1666, 1666, 1666, 1666, 1666, 648, 1666, 2040, 1666, 1666, + /* 2290 */ 2009, 2076, 647, 1666, 313, 2042, 651, 2044, 2045, 646, + /* 2300 */ 1666, 641, 1666, 2041, 2040, 1666, 1666, 1666, 2076, 2059, + /* 2310 */ 1666, 322, 2042, 651, 2044, 2045, 646, 648, 641, 1666, + /* 2320 */ 1666, 2040, 2009, 1666, 647, 2076, 1666, 2041, 314, 2042, + /* 2330 */ 651, 2044, 2045, 646, 2059, 641, 1666, 1666, 1666, 1666, + /* 2340 */ 1666, 1666, 648, 1666, 1666, 1666, 1666, 2009, 1666, 647, + /* 2350 */ 1666, 1666, 2041, 2040, 1666, 1666, 1666, 2076, 2059, 1666, + /* 2360 */ 323, 2042, 651, 2044, 2045, 646, 648, 641, 1666, 1666, + /* 2370 */ 1666, 2009, 1666, 647, 1666, 1666, 1666, 1666, 2040, 1666, + /* 2380 */ 1666, 1666, 2076, 2059, 1666, 315, 2042, 651, 2044, 2045, + /* 2390 */ 646, 648, 641, 1666, 1666, 1666, 2009, 1666, 647, 1666, + /* 2400 */ 1666, 1666, 2040, 1666, 1666, 1666, 2076, 1666, 1666, 328, + /* 2410 */ 2042, 651, 2044, 2045, 646, 2041, 641, 1666, 1666, 1666, + /* 2420 */ 1666, 1666, 1666, 1666, 1666, 1666, 1666, 2040, 1666, 1666, + /* 2430 */ 1666, 2076, 1666, 1666, 329, 2042, 651, 2044, 2045, 646, + /* 2440 */ 1666, 641, 2041, 1666, 1666, 1666, 2059, 1666, 1666, 1666, + /* 2450 */ 1666, 1666, 1666, 1666, 648, 1666, 1666, 1666, 1666, 2009, + /* 2460 */ 1666, 647, 1666, 1666, 2041, 1666, 1666, 1666, 1666, 1666, + /* 2470 */ 1666, 1666, 1666, 2059, 1666, 1666, 1666, 1666, 1666, 1666, + /* 2480 */ 1666, 648, 1666, 1666, 1666, 1666, 2009, 1666, 647, 1666, + /* 2490 */ 2040, 1666, 1666, 1666, 2076, 2059, 1666, 2053, 2042, 651, + /* 2500 */ 2044, 2045, 646, 648, 641, 1666, 1666, 1666, 2009, 1666, + /* 2510 */ 647, 1666, 1666, 1666, 1666, 1666, 1666, 2040, 1666, 1666, + /* 2520 */ 1666, 2076, 1666, 1666, 2052, 2042, 651, 2044, 2045, 646, + /* 2530 */ 1666, 641, 1666, 1666, 1666, 1666, 1666, 1666, 1666, 2040, + /* 2540 */ 1666, 1666, 1666, 2076, 2041, 1666, 2051, 2042, 651, 2044, + /* 2550 */ 2045, 646, 1666, 641, 1666, 1666, 1666, 1666, 1666, 1666, + /* 2560 */ 1666, 2041, 1666, 1666, 1666, 1666, 1666, 1666, 1666, 1666, + /* 2570 */ 1666, 1666, 1666, 1666, 1666, 2059, 1666, 1666, 2041, 1666, + /* 2580 */ 1666, 1666, 1666, 648, 1666, 1666, 1666, 1666, 2009, 1666, + /* 2590 */ 647, 1666, 2059, 1666, 1666, 1666, 1666, 1666, 1666, 1666, + /* 2600 */ 648, 1666, 1666, 1666, 1666, 2009, 1666, 647, 1666, 2059, + /* 2610 */ 2041, 1666, 1666, 1666, 1666, 1666, 1666, 648, 1666, 2040, + /* 2620 */ 1666, 1666, 2009, 2076, 647, 1666, 341, 2042, 651, 2044, + /* 2630 */ 2045, 646, 1666, 641, 1666, 2041, 2040, 1666, 1666, 1666, + /* 2640 */ 2076, 2059, 1666, 342, 2042, 651, 2044, 2045, 646, 648, + /* 2650 */ 641, 1666, 1666, 2040, 2009, 1666, 647, 2076, 1666, 2041, + /* 2660 */ 338, 2042, 651, 2044, 2045, 646, 2059, 641, 1666, 1666, + /* 2670 */ 1666, 1666, 1666, 1666, 648, 1666, 1666, 1666, 1666, 2009, + /* 2680 */ 1666, 647, 1666, 1666, 1666, 2040, 1666, 1666, 1666, 2076, + /* 2690 */ 2059, 1666, 343, 2042, 651, 2044, 2045, 646, 648, 641, + /* 2700 */ 1666, 1666, 1666, 2009, 1666, 647, 1666, 1666, 1666, 1666, + /* 2710 */ 649, 1666, 1666, 1666, 2076, 1666, 1666, 320, 2042, 651, + /* 2720 */ 2044, 2045, 646, 1666, 641, 1666, 1666, 1666, 1666, 1666, + /* 2730 */ 1666, 1666, 1666, 1666, 2040, 1666, 1666, 1666, 2076, 1666, + /* 2740 */ 1666, 319, 2042, 651, 2044, 2045, 646, 1666, 641, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 377, 439, 0, 339, 335, 443, 337, 338, 335, 363, @@ -542,179 +542,179 @@ static const YYCODETYPE yy_lookahead[] = { /* 440 */ 232, 370, 24, 25, 26, 27, 28, 29, 30, 31, /* 450 */ 32, 353, 375, 454, 455, 58, 163, 164, 360, 362, /* 460 */ 343, 96, 169, 170, 166, 424, 369, 426, 370, 171, - /* 470 */ 163, 164, 66, 67, 68, 378, 97, 184, 47, 186, + /* 470 */ 163, 164, 66, 67, 68, 378, 97, 184, 0, 186, /* 480 */ 74, 75, 365, 118, 96, 79, 188, 80, 190, 0, /* 490 */ 84, 85, 95, 133, 134, 98, 90, 127, 133, 134, /* 500 */ 376, 377, 20, 210, 211, 176, 213, 214, 215, 216, /* 510 */ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, - /* 520 */ 227, 228, 229, 230, 66, 67, 68, 96, 163, 164, + /* 520 */ 227, 228, 229, 230, 66, 67, 68, 71, 163, 164, /* 530 */ 0, 362, 74, 75, 169, 170, 339, 79, 369, 339, - /* 540 */ 248, 20, 84, 85, 165, 138, 139, 378, 90, 184, + /* 540 */ 248, 63, 84, 85, 165, 138, 139, 378, 90, 184, /* 550 */ 353, 186, 339, 353, 24, 25, 26, 27, 28, 29, - /* 560 */ 30, 31, 32, 193, 194, 394, 35, 370, 161, 398, + /* 560 */ 30, 31, 32, 193, 194, 394, 107, 370, 161, 398, /* 570 */ 370, 14, 15, 16, 331, 210, 211, 339, 213, 214, /* 580 */ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, - /* 590 */ 225, 226, 227, 228, 229, 230, 12, 13, 35, 386, - /* 600 */ 370, 388, 71, 274, 20, 362, 22, 339, 439, 339, + /* 590 */ 225, 226, 227, 228, 229, 230, 12, 13, 20, 386, + /* 600 */ 370, 388, 345, 274, 20, 362, 22, 339, 439, 339, /* 610 */ 439, 381, 443, 370, 443, 133, 134, 33, 375, 35, - /* 620 */ 377, 353, 33, 353, 386, 96, 388, 458, 459, 458, + /* 620 */ 377, 353, 33, 353, 386, 368, 388, 458, 459, 458, /* 630 */ 459, 96, 463, 464, 463, 464, 248, 48, 370, 331, /* 640 */ 370, 152, 58, 54, 55, 56, 57, 58, 339, 406, /* 650 */ 161, 169, 170, 410, 210, 71, 413, 414, 415, 416, - /* 660 */ 417, 418, 353, 420, 22, 354, 14, 331, 12, 13, - /* 670 */ 427, 20, 20, 362, 431, 432, 20, 35, 22, 370, - /* 680 */ 96, 4, 371, 375, 95, 331, 165, 98, 71, 33, + /* 660 */ 417, 418, 353, 420, 96, 354, 14, 331, 12, 13, + /* 670 */ 427, 3, 20, 362, 431, 432, 20, 71, 22, 370, + /* 680 */ 96, 4, 371, 375, 95, 331, 412, 98, 363, 33, /* 690 */ 331, 35, 423, 8, 9, 426, 19, 12, 13, 14, /* 700 */ 15, 16, 118, 259, 260, 261, 262, 263, 264, 265, - /* 710 */ 33, 375, 362, 364, 58, 8, 9, 133, 134, 12, - /* 720 */ 13, 14, 15, 16, 375, 48, 2, 71, 378, 375, + /* 710 */ 33, 375, 438, 331, 58, 8, 9, 133, 134, 12, + /* 720 */ 13, 14, 15, 16, 128, 48, 2, 71, 132, 375, /* 730 */ 53, 0, 8, 9, 375, 58, 12, 13, 14, 15, - /* 740 */ 16, 331, 439, 331, 331, 162, 443, 163, 164, 186, - /* 750 */ 339, 44, 96, 169, 170, 166, 167, 107, 409, 410, - /* 760 */ 171, 458, 459, 174, 353, 363, 463, 464, 184, 420, - /* 770 */ 186, 362, 95, 331, 118, 98, 362, 248, 331, 190, - /* 780 */ 371, 370, 97, 248, 370, 375, 358, 375, 375, 133, - /* 790 */ 134, 331, 1, 2, 210, 211, 71, 213, 214, 215, + /* 740 */ 16, 331, 439, 165, 162, 412, 443, 163, 164, 362, + /* 750 */ 339, 44, 96, 169, 170, 166, 167, 375, 1, 2, + /* 760 */ 171, 458, 459, 174, 353, 378, 463, 464, 184, 354, + /* 770 */ 186, 438, 95, 4, 118, 98, 362, 362, 331, 190, + /* 780 */ 363, 370, 97, 248, 370, 375, 371, 191, 192, 133, + /* 790 */ 134, 195, 358, 197, 210, 211, 20, 213, 214, 215, /* 800 */ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, - /* 810 */ 226, 227, 228, 229, 230, 163, 165, 375, 235, 163, - /* 820 */ 164, 18, 375, 20, 108, 169, 170, 128, 245, 415, - /* 830 */ 27, 345, 404, 30, 362, 375, 33, 45, 46, 108, + /* 810 */ 226, 227, 228, 229, 230, 163, 248, 235, 363, 163, + /* 820 */ 164, 18, 375, 20, 108, 169, 170, 245, 362, 415, + /* 830 */ 27, 348, 349, 30, 362, 44, 33, 371, 404, 108, /* 840 */ 184, 369, 186, 127, 128, 129, 130, 131, 132, 331, - /* 850 */ 378, 48, 20, 50, 368, 0, 53, 370, 127, 128, + /* 850 */ 378, 48, 331, 50, 97, 363, 53, 370, 127, 128, /* 860 */ 129, 130, 131, 132, 348, 349, 210, 211, 381, 213, /* 870 */ 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, - /* 880 */ 224, 225, 226, 227, 228, 229, 230, 354, 97, 399, - /* 890 */ 191, 192, 18, 375, 195, 362, 197, 23, 95, 0, - /* 900 */ 339, 8, 9, 0, 371, 12, 13, 14, 15, 16, - /* 910 */ 107, 37, 38, 363, 353, 41, 331, 39, 63, 8, - /* 920 */ 9, 362, 354, 12, 13, 14, 15, 16, 369, 355, - /* 930 */ 362, 370, 358, 59, 60, 61, 62, 378, 412, 371, - /* 940 */ 137, 348, 349, 140, 141, 142, 143, 144, 145, 146, + /* 880 */ 224, 225, 226, 227, 228, 229, 230, 350, 97, 352, + /* 890 */ 339, 372, 18, 375, 375, 331, 375, 23, 95, 0, + /* 900 */ 339, 8, 9, 0, 353, 12, 13, 14, 15, 16, + /* 910 */ 107, 37, 38, 363, 353, 41, 331, 370, 363, 8, + /* 920 */ 9, 370, 362, 12, 13, 14, 15, 16, 381, 369, + /* 930 */ 42, 370, 44, 59, 60, 61, 62, 269, 378, 375, + /* 940 */ 137, 165, 108, 140, 141, 142, 143, 144, 145, 146, /* 950 */ 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, - /* 960 */ 375, 158, 159, 160, 438, 66, 67, 68, 69, 70, + /* 960 */ 375, 158, 159, 160, 130, 66, 67, 68, 69, 70, /* 970 */ 96, 72, 73, 74, 75, 76, 77, 78, 79, 80, /* 980 */ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, - /* 990 */ 91, 92, 339, 339, 331, 8, 9, 165, 394, 12, + /* 990 */ 91, 92, 339, 339, 331, 8, 9, 399, 394, 12, /* 1000 */ 13, 14, 15, 16, 22, 339, 353, 353, 97, 135, - /* 1010 */ 331, 108, 3, 339, 339, 339, 4, 35, 350, 353, - /* 1020 */ 352, 331, 331, 370, 370, 331, 331, 353, 353, 353, - /* 1030 */ 127, 128, 129, 130, 131, 132, 370, 370, 375, 108, - /* 1040 */ 42, 362, 44, 439, 370, 370, 370, 443, 381, 370, + /* 1010 */ 331, 108, 0, 339, 339, 339, 20, 35, 249, 353, + /* 1020 */ 39, 331, 331, 370, 370, 331, 331, 353, 353, 353, + /* 1030 */ 127, 128, 129, 130, 131, 132, 370, 372, 375, 20, + /* 1040 */ 375, 362, 331, 439, 370, 370, 370, 443, 44, 370, /* 1050 */ 176, 177, 178, 71, 375, 181, 377, 339, 165, 331, - /* 1060 */ 412, 130, 458, 459, 44, 375, 375, 463, 464, 375, - /* 1070 */ 375, 353, 372, 364, 200, 375, 20, 203, 107, 205, - /* 1080 */ 206, 207, 208, 209, 375, 406, 438, 58, 370, 410, + /* 1060 */ 48, 355, 458, 459, 358, 375, 375, 463, 464, 375, + /* 1070 */ 375, 353, 44, 364, 200, 45, 46, 203, 35, 205, + /* 1080 */ 206, 207, 208, 209, 375, 406, 375, 332, 370, 410, /* 1090 */ 362, 364, 413, 414, 415, 416, 417, 418, 370, 420, - /* 1100 */ 118, 44, 375, 375, 425, 377, 427, 246, 247, 339, - /* 1110 */ 431, 432, 372, 0, 42, 375, 44, 97, 409, 410, - /* 1120 */ 411, 339, 248, 353, 445, 247, 331, 98, 157, 420, - /* 1130 */ 394, 363, 453, 44, 406, 353, 409, 410, 410, 363, - /* 1140 */ 370, 413, 414, 415, 416, 417, 418, 420, 420, 339, - /* 1150 */ 44, 423, 370, 425, 426, 427, 363, 362, 331, 431, - /* 1160 */ 432, 48, 0, 353, 100, 370, 184, 103, 186, 0, - /* 1170 */ 375, 100, 377, 100, 103, 439, 103, 44, 100, 443, - /* 1180 */ 370, 103, 44, 44, 22, 196, 97, 198, 64, 362, - /* 1190 */ 332, 22, 210, 211, 458, 459, 0, 370, 363, 463, - /* 1200 */ 464, 406, 375, 97, 377, 410, 35, 331, 413, 414, - /* 1210 */ 415, 416, 417, 418, 165, 420, 133, 134, 22, 163, - /* 1220 */ 425, 172, 427, 44, 44, 0, 431, 432, 44, 385, - /* 1230 */ 97, 44, 44, 406, 44, 97, 97, 410, 362, 351, - /* 1240 */ 413, 414, 415, 416, 417, 418, 370, 420, 453, 1, - /* 1250 */ 2, 375, 425, 377, 427, 96, 44, 394, 431, 432, - /* 1260 */ 44, 249, 44, 44, 44, 106, 331, 467, 456, 442, - /* 1270 */ 341, 44, 44, 13, 49, 450, 97, 97, 269, 44, - /* 1280 */ 362, 97, 406, 341, 97, 97, 410, 97, 338, 413, - /* 1290 */ 414, 415, 416, 417, 418, 35, 420, 362, 13, 385, - /* 1300 */ 331, 425, 439, 427, 374, 370, 443, 431, 432, 97, - /* 1310 */ 375, 385, 377, 97, 441, 97, 97, 97, 442, 460, - /* 1320 */ 35, 458, 459, 433, 97, 97, 463, 464, 271, 444, - /* 1330 */ 250, 362, 97, 408, 210, 48, 182, 396, 42, 370, - /* 1340 */ 407, 406, 382, 20, 375, 410, 377, 385, 413, 414, - /* 1350 */ 415, 416, 417, 418, 382, 420, 162, 186, 380, 20, - /* 1360 */ 425, 339, 427, 339, 380, 382, 431, 432, 380, 339, - /* 1370 */ 94, 339, 347, 331, 339, 406, 339, 442, 20, 410, + /* 1100 */ 118, 97, 375, 375, 425, 377, 427, 351, 354, 339, + /* 1110 */ 431, 432, 246, 247, 22, 64, 362, 364, 409, 410, + /* 1120 */ 411, 339, 248, 353, 445, 371, 331, 35, 375, 420, + /* 1130 */ 394, 42, 453, 44, 406, 353, 409, 410, 410, 0, + /* 1140 */ 370, 413, 414, 415, 416, 417, 418, 420, 420, 385, + /* 1150 */ 44, 423, 370, 425, 426, 427, 58, 362, 331, 431, + /* 1160 */ 432, 165, 409, 410, 107, 370, 184, 47, 186, 0, + /* 1170 */ 375, 100, 377, 420, 103, 439, 0, 100, 100, 443, + /* 1180 */ 103, 103, 163, 100, 0, 196, 103, 198, 49, 362, + /* 1190 */ 467, 22, 210, 211, 458, 459, 98, 370, 22, 463, + /* 1200 */ 464, 406, 375, 97, 377, 410, 22, 331, 413, 414, + /* 1210 */ 415, 416, 417, 418, 157, 420, 96, 44, 44, 44, + /* 1220 */ 425, 44, 427, 133, 134, 165, 431, 432, 247, 186, + /* 1230 */ 44, 44, 172, 406, 44, 1, 2, 410, 362, 35, + /* 1240 */ 413, 414, 415, 416, 417, 418, 370, 420, 453, 35, + /* 1250 */ 456, 375, 425, 377, 427, 96, 44, 394, 431, 432, + /* 1260 */ 44, 210, 44, 44, 44, 106, 331, 341, 450, 442, + /* 1270 */ 97, 97, 97, 13, 97, 44, 44, 44, 362, 44, + /* 1280 */ 44, 341, 406, 97, 97, 71, 410, 97, 338, 413, + /* 1290 */ 414, 415, 416, 417, 418, 35, 420, 362, 13, 271, + /* 1300 */ 331, 425, 439, 427, 385, 370, 443, 431, 432, 97, + /* 1310 */ 375, 374, 377, 97, 385, 97, 97, 97, 442, 441, + /* 1320 */ 35, 458, 459, 433, 460, 444, 463, 464, 97, 97, + /* 1330 */ 97, 362, 97, 97, 250, 408, 48, 407, 182, 370, + /* 1340 */ 396, 406, 42, 382, 375, 410, 377, 20, 413, 414, + /* 1350 */ 415, 416, 417, 418, 382, 420, 385, 162, 380, 20, + /* 1360 */ 425, 339, 427, 339, 380, 382, 431, 432, 380, 94, + /* 1370 */ 339, 339, 347, 331, 339, 406, 339, 442, 20, 410, /* 1380 */ 333, 333, 413, 414, 415, 416, 417, 418, 20, 420, - /* 1390 */ 345, 401, 12, 13, 425, 20, 427, 345, 377, 20, - /* 1400 */ 431, 432, 22, 340, 362, 20, 395, 345, 340, 339, - /* 1410 */ 345, 442, 370, 33, 345, 35, 52, 375, 333, 377, - /* 1420 */ 345, 345, 331, 342, 342, 339, 333, 96, 362, 362, - /* 1430 */ 375, 199, 405, 375, 403, 401, 362, 343, 58, 362, - /* 1440 */ 189, 362, 343, 400, 339, 385, 362, 258, 406, 362, - /* 1450 */ 362, 71, 410, 362, 362, 413, 414, 415, 416, 417, - /* 1460 */ 418, 370, 420, 362, 362, 449, 375, 425, 377, 427, - /* 1470 */ 257, 375, 377, 431, 432, 385, 266, 449, 375, 375, - /* 1480 */ 175, 267, 375, 268, 442, 452, 331, 390, 390, 451, - /* 1490 */ 449, 251, 448, 447, 468, 370, 446, 406, 118, 275, - /* 1500 */ 461, 410, 272, 270, 413, 414, 415, 416, 417, 418, - /* 1510 */ 247, 420, 20, 462, 339, 343, 425, 362, 427, 408, - /* 1520 */ 412, 340, 431, 432, 20, 370, 343, 390, 388, 167, - /* 1530 */ 375, 375, 377, 375, 390, 375, 375, 387, 375, 375, - /* 1540 */ 343, 343, 96, 358, 370, 430, 96, 352, 366, 343, - /* 1550 */ 339, 375, 36, 402, 331, 334, 356, 391, 391, 333, - /* 1560 */ 356, 406, 397, 356, 184, 410, 186, 329, 413, 414, - /* 1570 */ 415, 416, 417, 418, 0, 420, 0, 0, 344, 42, + /* 1390 */ 186, 401, 12, 13, 425, 345, 427, 20, 377, 345, + /* 1400 */ 431, 432, 22, 20, 362, 340, 20, 345, 395, 340, + /* 1410 */ 342, 442, 370, 33, 52, 35, 342, 375, 345, 377, + /* 1420 */ 345, 345, 331, 362, 339, 345, 333, 375, 362, 362, + /* 1430 */ 333, 375, 362, 339, 362, 401, 199, 96, 58, 375, + /* 1440 */ 189, 362, 403, 362, 362, 405, 343, 343, 406, 362, + /* 1450 */ 362, 71, 410, 362, 339, 413, 414, 415, 416, 417, + /* 1460 */ 418, 370, 420, 362, 258, 449, 375, 425, 377, 427, + /* 1470 */ 257, 385, 449, 431, 432, 375, 377, 266, 452, 175, + /* 1480 */ 375, 385, 375, 375, 442, 268, 331, 451, 267, 251, + /* 1490 */ 449, 447, 390, 448, 272, 400, 270, 406, 118, 390, + /* 1500 */ 275, 410, 247, 370, 413, 414, 415, 416, 417, 418, + /* 1510 */ 461, 420, 20, 343, 468, 462, 425, 362, 427, 408, + /* 1520 */ 339, 343, 431, 432, 412, 370, 20, 390, 340, 388, + /* 1530 */ 375, 167, 377, 375, 390, 375, 375, 375, 375, 375, + /* 1540 */ 387, 343, 343, 358, 96, 96, 430, 375, 366, 370, + /* 1550 */ 343, 36, 352, 402, 331, 0, 446, 391, 391, 356, + /* 1560 */ 333, 406, 334, 339, 184, 410, 186, 0, 413, 414, + /* 1570 */ 415, 416, 417, 418, 344, 420, 329, 397, 0, 42, /* 1580 */ 425, 0, 427, 35, 331, 362, 431, 432, 204, 35, - /* 1590 */ 210, 211, 35, 370, 35, 204, 0, 35, 375, 35, + /* 1590 */ 210, 211, 35, 370, 35, 356, 356, 204, 375, 0, /* 1600 */ 377, 204, 0, 223, 224, 225, 226, 227, 228, 229, - /* 1610 */ 204, 0, 0, 35, 331, 362, 22, 0, 191, 35, - /* 1620 */ 186, 184, 0, 370, 0, 0, 0, 180, 375, 406, - /* 1630 */ 377, 179, 0, 410, 47, 0, 413, 414, 415, 416, - /* 1640 */ 417, 418, 0, 420, 331, 362, 0, 42, 425, 0, - /* 1650 */ 427, 0, 0, 370, 431, 432, 0, 0, 375, 406, + /* 1610 */ 35, 35, 204, 0, 331, 362, 0, 35, 22, 0, + /* 1620 */ 35, 191, 186, 370, 184, 0, 0, 0, 375, 406, + /* 1630 */ 377, 180, 179, 410, 0, 0, 413, 414, 415, 416, + /* 1640 */ 417, 418, 47, 420, 331, 362, 0, 0, 425, 0, + /* 1650 */ 427, 42, 0, 370, 431, 432, 0, 0, 375, 406, /* 1660 */ 377, 0, 0, 410, 152, 35, 413, 414, 415, 416, - /* 1670 */ 417, 418, 0, 420, 0, 362, 152, 0, 331, 0, + /* 1670 */ 417, 418, 0, 420, 0, 362, 0, 0, 331, 152, /* 1680 */ 427, 0, 0, 370, 431, 432, 0, 0, 375, 406, /* 1690 */ 377, 0, 0, 410, 0, 0, 413, 414, 415, 416, /* 1700 */ 417, 418, 0, 420, 0, 331, 0, 0, 0, 362, - /* 1710 */ 427, 0, 42, 0, 431, 432, 0, 370, 0, 406, - /* 1720 */ 0, 0, 375, 410, 377, 0, 413, 414, 415, 416, - /* 1730 */ 417, 418, 22, 420, 0, 0, 362, 0, 0, 136, - /* 1740 */ 0, 0, 58, 0, 370, 0, 58, 35, 58, 375, - /* 1750 */ 0, 377, 47, 406, 331, 0, 42, 410, 39, 44, - /* 1760 */ 413, 414, 415, 416, 417, 418, 0, 420, 14, 47, - /* 1770 */ 14, 47, 40, 0, 0, 39, 0, 0, 465, 466, - /* 1780 */ 406, 39, 175, 0, 410, 362, 0, 413, 414, 415, - /* 1790 */ 416, 417, 418, 370, 420, 65, 0, 0, 375, 35, - /* 1800 */ 377, 427, 455, 0, 39, 48, 432, 35, 48, 39, - /* 1810 */ 0, 48, 0, 35, 39, 35, 331, 48, 0, 0, - /* 1820 */ 39, 0, 0, 35, 22, 0, 35, 35, 105, 406, - /* 1830 */ 103, 44, 0, 410, 35, 35, 413, 414, 415, 416, - /* 1840 */ 417, 418, 44, 420, 35, 22, 35, 362, 331, 35, - /* 1850 */ 22, 0, 22, 0, 50, 370, 22, 0, 0, 35, - /* 1860 */ 375, 35, 377, 0, 35, 22, 20, 96, 35, 0, - /* 1870 */ 35, 35, 35, 0, 165, 0, 22, 0, 96, 362, - /* 1880 */ 457, 97, 165, 187, 367, 3, 44, 370, 252, 96, - /* 1890 */ 96, 406, 375, 167, 377, 410, 165, 331, 413, 414, - /* 1900 */ 415, 416, 417, 418, 97, 420, 173, 231, 44, 256, - /* 1910 */ 97, 172, 44, 97, 331, 44, 172, 97, 252, 47, - /* 1920 */ 44, 47, 3, 406, 97, 252, 35, 410, 362, 44, - /* 1930 */ 413, 414, 415, 416, 417, 418, 370, 420, 97, 35, - /* 1940 */ 96, 375, 35, 377, 96, 362, 331, 35, 96, 35, - /* 1950 */ 367, 466, 96, 370, 44, 35, 97, 97, 375, 0, - /* 1960 */ 377, 0, 0, 47, 47, 0, 96, 39, 47, 0, - /* 1970 */ 39, 331, 406, 168, 246, 96, 410, 362, 44, 413, + /* 1710 */ 427, 0, 0, 0, 431, 432, 0, 370, 0, 406, + /* 1720 */ 42, 0, 375, 410, 377, 0, 413, 414, 415, 416, + /* 1730 */ 417, 418, 0, 420, 0, 0, 362, 22, 0, 0, + /* 1740 */ 136, 0, 0, 0, 370, 35, 58, 0, 58, 375, + /* 1750 */ 0, 377, 47, 406, 331, 58, 0, 410, 0, 42, + /* 1760 */ 413, 414, 415, 416, 417, 418, 39, 420, 44, 14, + /* 1770 */ 47, 14, 0, 47, 0, 0, 40, 0, 465, 466, + /* 1780 */ 406, 175, 0, 39, 410, 362, 0, 413, 414, 415, + /* 1790 */ 416, 417, 418, 370, 420, 0, 65, 0, 375, 39, + /* 1800 */ 377, 427, 455, 0, 35, 39, 432, 48, 0, 48, + /* 1810 */ 39, 35, 0, 35, 0, 48, 331, 0, 35, 39, + /* 1820 */ 39, 48, 0, 0, 0, 105, 35, 22, 0, 406, + /* 1830 */ 0, 103, 44, 410, 35, 35, 413, 414, 415, 416, + /* 1840 */ 417, 418, 35, 420, 35, 35, 44, 362, 331, 22, + /* 1850 */ 35, 35, 0, 22, 22, 370, 50, 0, 22, 0, + /* 1860 */ 375, 35, 377, 0, 35, 0, 22, 20, 35, 35, + /* 1870 */ 35, 0, 35, 35, 165, 0, 22, 0, 0, 362, + /* 1880 */ 457, 97, 3, 96, 367, 96, 187, 370, 252, 167, + /* 1890 */ 44, 406, 375, 97, 377, 410, 231, 331, 413, 414, + /* 1900 */ 415, 416, 417, 418, 256, 420, 173, 96, 44, 96, + /* 1910 */ 165, 165, 172, 44, 331, 97, 97, 96, 44, 47, + /* 1920 */ 172, 96, 252, 406, 44, 3, 35, 410, 362, 47, + /* 1930 */ 413, 414, 415, 416, 417, 418, 370, 420, 96, 44, + /* 1940 */ 97, 375, 97, 377, 96, 362, 331, 97, 252, 35, + /* 1950 */ 367, 466, 35, 370, 35, 35, 35, 97, 375, 97, + /* 1960 */ 377, 0, 44, 47, 0, 246, 0, 0, 39, 96, + /* 1970 */ 47, 331, 406, 47, 0, 39, 410, 362, 47, 413, /* 1980 */ 414, 415, 416, 417, 418, 370, 420, 97, 422, 406, /* 1990 */ 375, 97, 377, 410, 106, 331, 413, 414, 415, 416, - /* 2000 */ 417, 418, 362, 420, 96, 96, 166, 367, 2, 96, - /* 2010 */ 370, 47, 22, 47, 96, 375, 231, 377, 210, 96, - /* 2020 */ 331, 406, 233, 97, 97, 410, 362, 97, 413, 414, - /* 2030 */ 415, 416, 417, 418, 370, 420, 96, 231, 96, 375, - /* 2040 */ 96, 377, 96, 212, 47, 97, 406, 22, 96, 35, - /* 2050 */ 410, 362, 107, 413, 414, 415, 416, 417, 418, 370, - /* 2060 */ 420, 35, 35, 96, 375, 97, 377, 97, 97, 96, - /* 2070 */ 406, 35, 96, 35, 410, 97, 96, 413, 414, 415, - /* 2080 */ 416, 417, 418, 331, 420, 97, 35, 96, 22, 96, - /* 2090 */ 108, 44, 35, 22, 120, 406, 96, 120, 96, 410, - /* 2100 */ 65, 120, 413, 414, 415, 416, 417, 418, 64, 420, - /* 2110 */ 331, 35, 120, 35, 362, 35, 35, 35, 35, 71, - /* 2120 */ 35, 35, 370, 35, 35, 93, 35, 375, 35, 377, - /* 2130 */ 35, 44, 331, 35, 22, 35, 35, 35, 71, 35, - /* 2140 */ 35, 362, 35, 35, 22, 35, 0, 48, 35, 370, - /* 2150 */ 0, 35, 0, 39, 375, 0, 377, 48, 406, 39, - /* 2160 */ 35, 0, 410, 362, 35, 413, 414, 415, 416, 417, - /* 2170 */ 418, 370, 420, 39, 48, 48, 375, 39, 377, 35, - /* 2180 */ 35, 0, 22, 21, 21, 406, 22, 22, 20, 410, - /* 2190 */ 469, 469, 413, 414, 415, 416, 417, 418, 469, 420, + /* 2000 */ 417, 418, 362, 420, 96, 96, 96, 367, 168, 96, + /* 2010 */ 370, 2, 44, 22, 210, 375, 231, 377, 47, 166, + /* 2020 */ 331, 406, 97, 96, 231, 410, 362, 233, 413, 414, + /* 2030 */ 415, 416, 417, 418, 370, 420, 96, 96, 96, 375, + /* 2040 */ 97, 377, 97, 97, 96, 96, 406, 47, 22, 96, + /* 2050 */ 410, 362, 35, 413, 414, 415, 416, 417, 418, 370, + /* 2060 */ 420, 212, 107, 35, 375, 96, 377, 97, 35, 96, + /* 2070 */ 406, 35, 97, 97, 410, 96, 35, 413, 414, 415, + /* 2080 */ 416, 417, 418, 331, 420, 97, 96, 35, 22, 97, + /* 2090 */ 96, 108, 44, 96, 96, 406, 35, 120, 120, 410, + /* 2100 */ 120, 120, 413, 414, 415, 416, 417, 418, 96, 420, + /* 2110 */ 331, 22, 65, 64, 362, 35, 35, 35, 35, 44, + /* 2120 */ 35, 35, 370, 35, 71, 35, 35, 375, 35, 377, + /* 2130 */ 93, 35, 331, 35, 35, 22, 35, 35, 35, 71, + /* 2140 */ 35, 362, 35, 35, 35, 35, 22, 35, 0, 370, + /* 2150 */ 35, 0, 48, 39, 375, 35, 377, 39, 406, 0, + /* 2160 */ 35, 39, 410, 362, 48, 413, 414, 415, 416, 417, + /* 2170 */ 418, 370, 420, 0, 48, 35, 375, 48, 377, 0, + /* 2180 */ 39, 35, 35, 0, 22, 406, 21, 20, 22, 410, + /* 2190 */ 22, 21, 413, 414, 415, 416, 417, 418, 469, 420, /* 2200 */ 469, 469, 469, 469, 469, 469, 469, 406, 469, 469, /* 2210 */ 469, 410, 331, 469, 413, 414, 415, 416, 417, 418, /* 2220 */ 469, 420, 469, 469, 469, 469, 469, 469, 469, 331, @@ -771,87 +771,87 @@ static const YYCODETYPE yy_lookahead[] = { /* 2730 */ 469, 469, 469, 469, 406, 469, 469, 469, 410, 469, /* 2740 */ 469, 413, 414, 415, 416, 417, 418, 469, 420, }; -#define YY_SHIFT_COUNT (740) +#define YY_SHIFT_COUNT (741) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2181) +#define YY_SHIFT_MAX (2183) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 874, 0, 72, 0, 293, 293, 293, 293, 293, 293, /* 10 */ 293, 293, 293, 293, 293, 365, 584, 584, 656, 584, /* 20 */ 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, /* 30 */ 584, 584, 584, 584, 584, 584, 584, 584, 584, 584, /* 40 */ 584, 584, 584, 584, 584, 584, 584, 584, 172, 292, - /* 50 */ 185, 184, 135, 529, 535, 529, 184, 184, 1380, 1380, - /* 60 */ 1380, 529, 1380, 1380, 388, 529, 16, 482, 115, 115, + /* 50 */ 185, 184, 135, 535, 568, 535, 184, 184, 1380, 1380, + /* 60 */ 1380, 535, 1380, 1380, 388, 535, 16, 482, 115, 115, /* 70 */ 482, 86, 86, 307, 360, 346, 346, 115, 115, 115, /* 80 */ 115, 115, 115, 115, 147, 115, 115, 138, 16, 115, /* 90 */ 115, 189, 115, 16, 115, 147, 115, 147, 16, 115, /* 100 */ 115, 16, 115, 16, 16, 16, 115, 176, 803, 34, /* 110 */ 34, 219, 458, 982, 982, 982, 982, 982, 982, 982, /* 120 */ 982, 982, 982, 982, 982, 982, 982, 982, 982, 982, - /* 130 */ 982, 982, 217, 379, 307, 360, 2, 249, 521, 521, - /* 140 */ 521, 855, 152, 152, 249, 245, 245, 245, 260, 208, - /* 150 */ 16, 617, 16, 617, 617, 650, 725, 36, 36, 36, + /* 130 */ 982, 982, 217, 379, 307, 360, 2, 249, 578, 578, + /* 140 */ 578, 478, 152, 152, 249, 245, 245, 245, 260, 208, + /* 150 */ 16, 456, 16, 456, 456, 459, 606, 36, 36, 36, /* 160 */ 36, 36, 36, 36, 36, 298, 418, 406, 47, 329, - /* 170 */ 444, 60, 69, 239, 652, 353, 651, 792, 931, 832, - /* 180 */ 861, 878, 1009, 861, 998, 1012, 1056, 1080, 1287, 1154, - /* 190 */ 1296, 1323, 1296, 1194, 1339, 1339, 1296, 1194, 1194, 1339, - /* 200 */ 1276, 1339, 1339, 1339, 1358, 1358, 1368, 138, 1375, 138, - /* 210 */ 1379, 1385, 138, 1379, 138, 138, 138, 1339, 138, 1364, - /* 220 */ 1364, 1358, 16, 16, 16, 16, 16, 16, 16, 16, - /* 230 */ 16, 16, 16, 1339, 1358, 617, 617, 1232, 1331, 1368, - /* 240 */ 176, 1251, 1375, 176, 1339, 1323, 1323, 617, 1189, 1213, - /* 250 */ 617, 1189, 1213, 617, 617, 16, 1210, 1305, 1189, 1215, - /* 260 */ 1214, 1240, 1080, 1224, 1230, 1233, 1263, 245, 1492, 1339, - /* 270 */ 1379, 176, 176, 1504, 1213, 617, 617, 617, 617, 617, - /* 280 */ 1213, 617, 1362, 176, 650, 176, 245, 1446, 1450, 617, - /* 290 */ 725, 1339, 176, 1516, 1358, 2749, 2749, 2749, 2749, 2749, - /* 300 */ 2749, 2749, 2749, 2749, 899, 589, 530, 677, 685, 707, - /* 310 */ 911, 731, 15, 724, 893, 903, 987, 987, 987, 987, - /* 320 */ 987, 987, 987, 987, 987, 716, 699, 295, 295, 407, - /* 330 */ 161, 489, 397, 81, 370, 263, 263, 557, 791, 583, - /* 340 */ 557, 557, 557, 1113, 1020, 642, 1072, 971, 1064, 1071, - /* 350 */ 1073, 1078, 1162, 1169, 1196, 989, 1089, 1106, 1029, 1133, - /* 360 */ 1138, 1139, 1083, 1057, 66, 1049, 1179, 1180, 1184, 1187, - /* 370 */ 1188, 1190, 1248, 1212, 563, 1171, 1124, 1216, 431, 1218, - /* 380 */ 1219, 1220, 1227, 1228, 1235, 1159, 1260, 1285, 531, 1225, - /* 390 */ 1574, 1576, 1577, 1537, 1581, 1548, 1384, 1554, 1557, 1559, - /* 400 */ 1391, 1596, 1562, 1564, 1397, 1602, 1406, 1611, 1578, 1612, - /* 410 */ 1594, 1617, 1584, 1427, 1434, 1437, 1622, 1624, 1625, 1447, - /* 420 */ 1452, 1626, 1632, 1587, 1635, 1642, 1646, 1605, 1649, 1651, - /* 430 */ 1652, 1656, 1657, 1661, 1662, 1672, 1512, 1630, 1674, 1524, - /* 440 */ 1677, 1679, 1681, 1682, 1686, 1687, 1691, 1692, 1694, 1695, - /* 450 */ 1702, 1704, 1706, 1707, 1708, 1711, 1670, 1713, 1716, 1718, - /* 460 */ 1720, 1721, 1725, 1710, 1734, 1735, 1737, 1603, 1738, 1740, - /* 470 */ 1741, 1684, 1712, 1743, 1688, 1745, 1690, 1750, 1755, 1714, - /* 480 */ 1719, 1715, 1705, 1754, 1722, 1756, 1724, 1766, 1732, 1736, - /* 490 */ 1773, 1774, 1776, 1742, 1607, 1777, 1783, 1786, 1730, 1796, - /* 500 */ 1797, 1764, 1757, 1765, 1803, 1772, 1760, 1770, 1810, 1778, - /* 510 */ 1763, 1775, 1812, 1780, 1769, 1781, 1818, 1819, 1821, 1822, - /* 520 */ 1723, 1727, 1788, 1802, 1825, 1791, 1792, 1799, 1800, 1787, - /* 530 */ 1798, 1809, 1811, 1823, 1814, 1832, 1828, 1851, 1830, 1804, - /* 540 */ 1853, 1834, 1824, 1857, 1826, 1858, 1829, 1863, 1843, 1846, - /* 550 */ 1833, 1835, 1836, 1784, 1771, 1869, 1709, 1782, 1837, 1873, - /* 560 */ 1696, 1854, 1717, 1726, 1875, 1877, 1731, 1733, 1882, 1842, - /* 570 */ 1636, 1793, 1807, 1794, 1739, 1676, 1744, 1653, 1813, 1864, - /* 580 */ 1868, 1816, 1844, 1848, 1852, 1820, 1871, 1872, 1874, 1856, - /* 590 */ 1876, 1666, 1827, 1841, 1919, 1885, 1673, 1891, 1904, 1907, - /* 600 */ 1912, 1914, 1920, 1859, 1860, 1916, 1728, 1910, 1917, 1959, - /* 610 */ 1961, 1962, 1965, 1870, 1928, 1705, 1921, 1879, 1890, 1894, - /* 620 */ 1908, 1909, 1805, 1913, 1969, 1931, 1840, 1918, 1888, 1705, - /* 630 */ 1964, 1934, 1785, 1789, 1806, 2006, 1990, 1808, 1923, 1926, - /* 640 */ 1940, 1927, 1942, 1930, 1966, 1944, 1946, 1997, 1948, 2025, - /* 650 */ 1831, 1952, 1945, 1968, 2014, 2026, 1967, 1970, 2027, 1973, - /* 660 */ 1971, 2036, 1976, 1978, 2038, 1980, 1988, 2051, 1991, 1974, - /* 670 */ 1977, 1981, 1992, 2066, 1982, 1993, 2047, 2000, 2057, 2002, - /* 680 */ 2047, 2047, 2071, 2035, 2044, 2076, 2078, 2080, 2081, 2082, - /* 690 */ 2083, 2085, 2086, 2088, 2089, 2048, 2032, 2087, 2091, 2093, - /* 700 */ 2095, 2112, 2098, 2100, 2101, 2067, 1787, 2102, 1798, 2104, - /* 710 */ 2105, 2107, 2108, 2122, 2110, 2146, 2113, 2099, 2114, 2150, - /* 720 */ 2116, 2109, 2120, 2152, 2125, 2126, 2134, 2155, 2129, 2127, - /* 730 */ 2138, 2161, 2144, 2145, 2181, 2160, 2162, 2164, 2165, 2163, - /* 740 */ 2168, + /* 170 */ 444, 60, 69, 239, 652, 353, 776, 1030, 834, 996, + /* 180 */ 866, 981, 668, 866, 888, 769, 1019, 1084, 1288, 1156, + /* 190 */ 1300, 1327, 1300, 1195, 1339, 1339, 1300, 1195, 1195, 1339, + /* 200 */ 1275, 1339, 1339, 1339, 1358, 1358, 1368, 138, 1377, 138, + /* 210 */ 1383, 1386, 138, 1383, 138, 138, 138, 1339, 138, 1362, + /* 220 */ 1362, 1358, 16, 16, 16, 16, 16, 16, 16, 16, + /* 230 */ 16, 16, 16, 1339, 1358, 456, 456, 456, 1237, 1341, + /* 240 */ 1368, 176, 1251, 1377, 176, 1339, 1327, 1327, 456, 1206, + /* 250 */ 1213, 456, 1206, 1213, 456, 456, 16, 1211, 1304, 1206, + /* 260 */ 1217, 1221, 1238, 1084, 1225, 1222, 1226, 1255, 245, 1492, + /* 270 */ 1339, 1383, 176, 176, 1506, 1213, 456, 456, 456, 456, + /* 280 */ 456, 1213, 456, 1364, 176, 459, 176, 245, 1448, 1449, + /* 290 */ 456, 606, 1339, 176, 1515, 1358, 2749, 2749, 2749, 2749, + /* 300 */ 2749, 2749, 2749, 2749, 2749, 899, 589, 530, 677, 685, + /* 310 */ 707, 911, 731, 15, 724, 893, 903, 987, 987, 987, + /* 320 */ 987, 987, 987, 987, 987, 987, 716, 596, 295, 295, + /* 330 */ 407, 161, 489, 397, 81, 370, 263, 263, 557, 757, + /* 340 */ 582, 557, 557, 557, 1012, 791, 1092, 1089, 1057, 1071, + /* 350 */ 1077, 1078, 1083, 1169, 1176, 1184, 989, 1004, 1106, 1098, + /* 360 */ 1173, 1174, 1175, 1090, 1028, 66, 1060, 1177, 1186, 1187, + /* 370 */ 1190, 1212, 1216, 1234, 1218, 1043, 1204, 1051, 1219, 1120, + /* 380 */ 1220, 1231, 1232, 1233, 1235, 1236, 1159, 1260, 1285, 1214, + /* 390 */ 1139, 1555, 1567, 1578, 1537, 1581, 1548, 1384, 1554, 1557, + /* 400 */ 1559, 1393, 1599, 1575, 1576, 1397, 1602, 1408, 1613, 1582, + /* 410 */ 1616, 1596, 1619, 1585, 1430, 1436, 1440, 1625, 1626, 1627, + /* 420 */ 1451, 1453, 1634, 1635, 1595, 1646, 1647, 1649, 1609, 1652, + /* 430 */ 1656, 1657, 1661, 1662, 1672, 1674, 1676, 1512, 1630, 1677, + /* 440 */ 1527, 1681, 1682, 1686, 1687, 1691, 1692, 1694, 1695, 1702, + /* 450 */ 1704, 1706, 1707, 1708, 1711, 1712, 1713, 1678, 1716, 1718, + /* 460 */ 1721, 1725, 1732, 1734, 1715, 1735, 1738, 1739, 1604, 1741, + /* 470 */ 1742, 1743, 1688, 1710, 1747, 1690, 1750, 1697, 1756, 1758, + /* 480 */ 1717, 1727, 1724, 1705, 1755, 1723, 1757, 1726, 1772, 1736, + /* 490 */ 1744, 1774, 1775, 1777, 1760, 1606, 1782, 1786, 1795, 1731, + /* 500 */ 1797, 1803, 1769, 1759, 1766, 1808, 1776, 1761, 1771, 1812, + /* 510 */ 1778, 1767, 1780, 1814, 1783, 1773, 1781, 1817, 1822, 1823, + /* 520 */ 1824, 1720, 1728, 1791, 1805, 1828, 1799, 1800, 1807, 1809, + /* 530 */ 1788, 1802, 1810, 1815, 1827, 1816, 1830, 1831, 1852, 1832, + /* 540 */ 1806, 1857, 1836, 1826, 1859, 1829, 1863, 1833, 1865, 1844, + /* 550 */ 1847, 1834, 1835, 1837, 1784, 1787, 1871, 1709, 1789, 1838, + /* 560 */ 1875, 1699, 1854, 1745, 1722, 1877, 1878, 1746, 1733, 1879, + /* 570 */ 1846, 1636, 1811, 1796, 1813, 1740, 1665, 1748, 1648, 1818, + /* 580 */ 1864, 1869, 1819, 1821, 1825, 1842, 1843, 1874, 1872, 1882, + /* 590 */ 1848, 1880, 1670, 1845, 1850, 1922, 1895, 1696, 1891, 1914, + /* 600 */ 1917, 1919, 1920, 1921, 1860, 1862, 1916, 1719, 1918, 1923, + /* 610 */ 1961, 1964, 1966, 1967, 1873, 1929, 1705, 1926, 1908, 1890, + /* 620 */ 1894, 1909, 1910, 1840, 1913, 1974, 1936, 1853, 1927, 1888, + /* 630 */ 1705, 1931, 1968, 1785, 1794, 1793, 2009, 1991, 1804, 1940, + /* 640 */ 1925, 1941, 1943, 1942, 1945, 1971, 1948, 1949, 2000, 1946, + /* 650 */ 2026, 1849, 1953, 1955, 1970, 2017, 2028, 1969, 1975, 2033, + /* 660 */ 1973, 1976, 2036, 1979, 1988, 2041, 1990, 1992, 2052, 1994, + /* 670 */ 1977, 1978, 1980, 1981, 2066, 1983, 1997, 2048, 1998, 2061, + /* 680 */ 2012, 2048, 2048, 2089, 2047, 2049, 2080, 2081, 2082, 2083, + /* 690 */ 2085, 2086, 2088, 2090, 2091, 2093, 2053, 2037, 2075, 2096, + /* 700 */ 2098, 2099, 2113, 2101, 2102, 2103, 2068, 1788, 2105, 1802, + /* 710 */ 2107, 2108, 2109, 2110, 2124, 2112, 2148, 2115, 2104, 2114, + /* 720 */ 2151, 2120, 2116, 2118, 2159, 2125, 2126, 2122, 2173, 2140, + /* 730 */ 2129, 2141, 2179, 2146, 2147, 2183, 2162, 2165, 2166, 2168, + /* 740 */ 2170, 2167, }; -#define YY_REDUCE_COUNT (303) +#define YY_REDUCE_COUNT (304) #define YY_REDUCE_MIN (-438) #define YY_REDUCE_MAX (2328) static const short yy_reduce_ofst[] = { @@ -861,108 +861,108 @@ static const short yy_reduce_ofst[] = { /* 30 */ 1779, 1801, 1881, 1898, 1915, 1947, 1972, 1996, 2021, 2084, /* 40 */ 2111, 2133, 2213, 2230, 2247, 2279, 2304, 2328, -264, 169, /* 50 */ 171, -173, -350, 604, 736, 863, -336, -5, -323, -61, - /* 60 */ 709, 303, 349, 727, -438, -386, -180, -212, 68, 98, + /* 60 */ 709, 303, 727, 753, -438, -386, -180, -212, 68, 98, /* 70 */ -377, -331, -327, -344, -307, -199, -7, 71, 197, 200, - /* 80 */ 268, 270, 309, 411, -154, 561, 653, 13, -324, 654, - /* 90 */ 675, 414, 676, 97, 718, 213, 770, 238, 311, 666, - /* 100 */ 674, 472, 782, 533, 559, 568, 810, -105, -288, -93, - /* 110 */ -93, -75, -272, -187, 77, 308, 336, 354, 359, 410, - /* 120 */ 412, 413, 442, 447, 460, 518, 585, 663, 690, 691, - /* 130 */ 694, 695, -342, -41, -206, 124, 486, 516, -41, 526, - /* 140 */ 648, 117, -23, 41, 593, 230, 487, 667, 428, 269, - /* 150 */ 409, 50, 350, 700, 740, 574, 668, -354, 402, 550, - /* 160 */ 768, 776, 793, 835, 776, 490, 858, 888, 844, 800, - /* 170 */ 812, 929, 825, 918, 918, 942, 914, 950, 930, 926, - /* 180 */ 873, 873, 859, 873, 890, 885, 918, 925, 933, 941, - /* 190 */ 960, 962, 972, 978, 1022, 1024, 983, 984, 988, 1030, - /* 200 */ 1025, 1032, 1035, 1037, 1047, 1048, 990, 1045, 1021, 1052, - /* 210 */ 1063, 1011, 1062, 1068, 1065, 1069, 1075, 1070, 1076, 1081, - /* 220 */ 1082, 1085, 1066, 1067, 1074, 1077, 1079, 1084, 1087, 1088, - /* 230 */ 1092, 1101, 1102, 1086, 1093, 1055, 1058, 1027, 1031, 1034, - /* 240 */ 1094, 1043, 1095, 1099, 1105, 1060, 1090, 1096, 1016, 1097, - /* 250 */ 1103, 1028, 1098, 1104, 1107, 918, 1033, 1038, 1041, 1044, - /* 260 */ 1046, 1050, 1111, 1026, 1051, 1039, 873, 1125, 1108, 1175, - /* 270 */ 1181, 1172, 1183, 1140, 1137, 1156, 1158, 1160, 1161, 1163, - /* 280 */ 1144, 1164, 1150, 1197, 1185, 1198, 1174, 1115, 1182, 1176, - /* 290 */ 1195, 1211, 1206, 1221, 1226, 1165, 1151, 1166, 1167, 1200, - /* 300 */ 1204, 1207, 1234, 1238, + /* 80 */ 268, 270, 309, 411, -154, 551, 561, 13, -324, 653, + /* 90 */ 654, 414, 675, 97, 676, 213, 718, 238, 311, 666, + /* 100 */ 674, 472, 782, 415, 560, 754, 770, -105, -288, -93, + /* 110 */ -93, -75, -272, -187, 77, 308, 336, 354, 359, 382, + /* 120 */ 410, 447, 518, 521, 564, 585, 663, 690, 691, 694, + /* 130 */ 695, 711, -342, -41, -206, 124, 257, 516, -41, 274, + /* 140 */ 333, 117, -23, 41, 483, 230, 487, 547, 434, 269, + /* 150 */ 466, 50, 387, 519, 665, 706, 537, -354, 325, 417, + /* 160 */ 455, 492, 550, 555, 492, 598, 755, 756, 764, 723, + /* 170 */ 794, 926, 818, 916, 916, 940, 919, 950, 937, 929, + /* 180 */ 878, 878, 864, 878, 890, 881, 916, 927, 930, 944, + /* 190 */ 961, 971, 972, 978, 1022, 1024, 983, 984, 988, 1031, + /* 200 */ 1025, 1032, 1035, 1037, 1047, 1048, 990, 1050, 1021, 1054, + /* 210 */ 1065, 1013, 1062, 1069, 1073, 1075, 1076, 1085, 1080, 1068, + /* 220 */ 1074, 1093, 1061, 1066, 1067, 1070, 1072, 1079, 1081, 1082, + /* 230 */ 1087, 1088, 1101, 1094, 1097, 1052, 1056, 1064, 1040, 1039, + /* 240 */ 1034, 1103, 1095, 1099, 1104, 1115, 1086, 1096, 1100, 1016, + /* 250 */ 1102, 1105, 1023, 1109, 1107, 1108, 916, 1026, 1036, 1041, + /* 260 */ 1045, 1044, 1110, 1111, 1046, 1053, 1049, 878, 1133, 1112, + /* 270 */ 1181, 1188, 1170, 1178, 1141, 1137, 1158, 1160, 1161, 1162, + /* 280 */ 1163, 1144, 1164, 1153, 1198, 1185, 1199, 1179, 1116, 1182, + /* 290 */ 1172, 1200, 1224, 1207, 1228, 1227, 1180, 1151, 1166, 1167, + /* 300 */ 1203, 1239, 1240, 1230, 1247, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 10 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 20 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 30 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 40 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 50 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 60 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 70 */ 1661, 1661, 1661, 1919, 1661, 1661, 1661, 1661, 1661, 1661, - /* 80 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1740, 1661, 1661, - /* 90 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 100 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1738, 1912, 2127, - /* 110 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 120 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 130 */ 1661, 1661, 1661, 2139, 1661, 1661, 1740, 1661, 2139, 2139, - /* 140 */ 2139, 1738, 2099, 2099, 1661, 1661, 1661, 1661, 1973, 1661, - /* 150 */ 1661, 1661, 1661, 1661, 1661, 1847, 1661, 1661, 1661, 1661, - /* 160 */ 1661, 1871, 1661, 1661, 1661, 1965, 1661, 1661, 2164, 2220, - /* 170 */ 1661, 1661, 2167, 1661, 1661, 1661, 1924, 1661, 1800, 2154, - /* 180 */ 2131, 2145, 2204, 2132, 2129, 2148, 1661, 2158, 1661, 1958, - /* 190 */ 1917, 1661, 1917, 1914, 1661, 1661, 1917, 1914, 1914, 1661, - /* 200 */ 1791, 1661, 1661, 1661, 1661, 1661, 1661, 1740, 1661, 1740, - /* 210 */ 1661, 1661, 1740, 1661, 1740, 1740, 1740, 1661, 1740, 1718, - /* 220 */ 1718, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 230 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1984, 1971, 1661, - /* 240 */ 1738, 1967, 1661, 1738, 1661, 1661, 1661, 1661, 2175, 2173, - /* 250 */ 1661, 2175, 2173, 1661, 1661, 1661, 2189, 2185, 2175, 2193, - /* 260 */ 2191, 2160, 2158, 2223, 2210, 2206, 2145, 1661, 1661, 1661, - /* 270 */ 1661, 1738, 1738, 1661, 2173, 1661, 1661, 1661, 1661, 1661, - /* 280 */ 2173, 1661, 1661, 1738, 1661, 1738, 1661, 1661, 1816, 1661, - /* 290 */ 1661, 1661, 1738, 1693, 1661, 1960, 1976, 1942, 1942, 1850, - /* 300 */ 1850, 1850, 1741, 1666, 1661, 1661, 1661, 1661, 1661, 1661, - /* 310 */ 1661, 1661, 1661, 1661, 1661, 1661, 2188, 2187, 2054, 1661, - /* 320 */ 2103, 2102, 2101, 2092, 2053, 1812, 1661, 2052, 2051, 1661, - /* 330 */ 1661, 1661, 1661, 1661, 1661, 1933, 1932, 2045, 1661, 1661, - /* 340 */ 2046, 2044, 2043, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 350 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 360 */ 1661, 1661, 1661, 2207, 2211, 1661, 1661, 1661, 1661, 1661, - /* 370 */ 1661, 1661, 2128, 1661, 1661, 1661, 1661, 1661, 2027, 1661, - /* 380 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 390 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 400 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 410 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 420 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 430 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 440 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 450 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 460 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 470 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 480 */ 1661, 1698, 2032, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 490 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 500 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 510 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 520 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1779, - /* 530 */ 1778, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 540 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 550 */ 1661, 1661, 1661, 2036, 1661, 1661, 1661, 1661, 1661, 1661, - /* 560 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 2203, 2161, - /* 570 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 580 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 2027, 1661, - /* 590 */ 2186, 1661, 1661, 2201, 1661, 2205, 1661, 1661, 1661, 1661, - /* 600 */ 1661, 1661, 1661, 2138, 2134, 1661, 1661, 2130, 1661, 1661, - /* 610 */ 1661, 1661, 1661, 1661, 1661, 2035, 1661, 1661, 1661, 1661, - /* 620 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 2026, - /* 630 */ 1661, 2089, 1661, 1661, 1661, 2123, 1661, 1661, 2074, 1661, - /* 640 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 2036, 1661, - /* 650 */ 2039, 1661, 1661, 1661, 1661, 1661, 1844, 1661, 1661, 1661, - /* 660 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1829, - /* 670 */ 1827, 1826, 1825, 1661, 1822, 1661, 1857, 1661, 1661, 1661, - /* 680 */ 1853, 1852, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 690 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1759, 1661, 1661, - /* 700 */ 1661, 1661, 1661, 1661, 1661, 1661, 1751, 1661, 1750, 1661, - /* 710 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 720 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 730 */ 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, 1661, - /* 740 */ 1661, + /* 0 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 10 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 20 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 30 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 40 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 50 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 60 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 70 */ 1664, 1664, 1664, 1922, 1664, 1664, 1664, 1664, 1664, 1664, + /* 80 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1743, 1664, 1664, + /* 90 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 100 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1741, 1915, 2131, + /* 110 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 120 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 130 */ 1664, 1664, 1664, 2143, 1664, 1664, 1743, 1664, 2143, 2143, + /* 140 */ 2143, 1741, 2103, 2103, 1664, 1664, 1664, 1664, 1976, 1664, + /* 150 */ 1664, 1664, 1664, 1664, 1664, 1850, 1664, 1664, 1664, 1664, + /* 160 */ 1664, 1874, 1664, 1664, 1664, 1968, 1664, 1664, 2168, 2224, + /* 170 */ 1664, 1664, 2171, 1664, 1664, 1664, 1927, 1664, 1803, 2158, + /* 180 */ 2135, 2149, 2208, 2136, 2133, 2152, 1664, 2162, 1664, 1961, + /* 190 */ 1920, 1664, 1920, 1917, 1664, 1664, 1920, 1917, 1917, 1664, + /* 200 */ 1794, 1664, 1664, 1664, 1664, 1664, 1664, 1743, 1664, 1743, + /* 210 */ 1664, 1664, 1743, 1664, 1743, 1743, 1743, 1664, 1743, 1721, + /* 220 */ 1721, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 230 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1988, 1974, + /* 240 */ 1664, 1741, 1970, 1664, 1741, 1664, 1664, 1664, 1664, 2179, + /* 250 */ 2177, 1664, 2179, 2177, 1664, 1664, 1664, 2193, 2189, 2179, + /* 260 */ 2197, 2195, 2164, 2162, 2227, 2214, 2210, 2149, 1664, 1664, + /* 270 */ 1664, 1664, 1741, 1741, 1664, 2177, 1664, 1664, 1664, 1664, + /* 280 */ 1664, 2177, 1664, 1664, 1741, 1664, 1741, 1664, 1664, 1819, + /* 290 */ 1664, 1664, 1664, 1741, 1696, 1664, 1963, 1979, 1945, 1945, + /* 300 */ 1853, 1853, 1853, 1744, 1669, 1664, 1664, 1664, 1664, 1664, + /* 310 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 2192, 2191, 2058, + /* 320 */ 1664, 2107, 2106, 2105, 2096, 2057, 1815, 1664, 2056, 2055, + /* 330 */ 1664, 1664, 1664, 1664, 1664, 1664, 1936, 1935, 2049, 1664, + /* 340 */ 1664, 2050, 2048, 2047, 1664, 1664, 1664, 1664, 1664, 1664, + /* 350 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 360 */ 1664, 1664, 1664, 1664, 2211, 2215, 1664, 1664, 1664, 1664, + /* 370 */ 1664, 1664, 1664, 2132, 1664, 1664, 1664, 1664, 1664, 2031, + /* 380 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 390 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 400 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 410 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 420 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 430 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 440 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 450 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 460 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 470 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 480 */ 1664, 1664, 1701, 2036, 1664, 1664, 1664, 1664, 1664, 1664, + /* 490 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 500 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 510 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 520 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 530 */ 1782, 1781, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 540 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 550 */ 1664, 1664, 1664, 1664, 2040, 1664, 1664, 1664, 1664, 1664, + /* 560 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 2207, + /* 570 */ 2165, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 580 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 2031, + /* 590 */ 1664, 2190, 1664, 1664, 2205, 1664, 2209, 1664, 1664, 1664, + /* 600 */ 1664, 1664, 1664, 1664, 2142, 2138, 1664, 1664, 2134, 1664, + /* 610 */ 1664, 1664, 1664, 1664, 1664, 1664, 2039, 1664, 1664, 1664, + /* 620 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 630 */ 2030, 1664, 2093, 1664, 1664, 1664, 2127, 1664, 1664, 2078, + /* 640 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 2040, + /* 650 */ 1664, 2043, 1664, 1664, 1664, 1664, 1664, 1847, 1664, 1664, + /* 660 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 670 */ 1832, 1830, 1829, 1828, 1664, 1825, 1664, 1860, 1664, 1664, + /* 680 */ 1664, 1856, 1855, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 690 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1762, 1664, + /* 700 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1754, 1664, 1753, + /* 710 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 720 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 730 */ 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, 1664, + /* 740 */ 1664, 1664, }; /********** End of lemon-generated parsing tables *****************************/ @@ -2192,249 +2192,250 @@ static const char *const yyRuleName[] = { /* 316 */ "stream_options ::= stream_options WATERMARK duration_literal", /* 317 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER", /* 318 */ "stream_options ::= stream_options FILL_HISTORY NK_INTEGER", - /* 319 */ "stream_options ::= stream_options IGNORE UPDATE NK_INTEGER", - /* 320 */ "subtable_opt ::=", - /* 321 */ "subtable_opt ::= SUBTABLE NK_LP expression NK_RP", - /* 322 */ "cmd ::= KILL CONNECTION NK_INTEGER", - /* 323 */ "cmd ::= KILL QUERY NK_STRING", - /* 324 */ "cmd ::= KILL TRANSACTION NK_INTEGER", - /* 325 */ "cmd ::= BALANCE VGROUP", - /* 326 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", - /* 327 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", - /* 328 */ "cmd ::= SPLIT VGROUP NK_INTEGER", - /* 329 */ "dnode_list ::= DNODE NK_INTEGER", - /* 330 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", - /* 331 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", - /* 332 */ "cmd ::= query_or_subquery", - /* 333 */ "cmd ::= insert_query", - /* 334 */ "insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery", - /* 335 */ "insert_query ::= INSERT INTO full_table_name query_or_subquery", - /* 336 */ "literal ::= NK_INTEGER", - /* 337 */ "literal ::= NK_FLOAT", - /* 338 */ "literal ::= NK_STRING", - /* 339 */ "literal ::= NK_BOOL", - /* 340 */ "literal ::= TIMESTAMP NK_STRING", - /* 341 */ "literal ::= duration_literal", - /* 342 */ "literal ::= NULL", - /* 343 */ "literal ::= NK_QUESTION", - /* 344 */ "duration_literal ::= NK_VARIABLE", - /* 345 */ "signed ::= NK_INTEGER", - /* 346 */ "signed ::= NK_PLUS NK_INTEGER", - /* 347 */ "signed ::= NK_MINUS NK_INTEGER", - /* 348 */ "signed ::= NK_FLOAT", - /* 349 */ "signed ::= NK_PLUS NK_FLOAT", - /* 350 */ "signed ::= NK_MINUS NK_FLOAT", - /* 351 */ "signed_literal ::= signed", - /* 352 */ "signed_literal ::= NK_STRING", - /* 353 */ "signed_literal ::= NK_BOOL", - /* 354 */ "signed_literal ::= TIMESTAMP NK_STRING", - /* 355 */ "signed_literal ::= duration_literal", - /* 356 */ "signed_literal ::= NULL", - /* 357 */ "signed_literal ::= literal_func", - /* 358 */ "signed_literal ::= NK_QUESTION", - /* 359 */ "literal_list ::= signed_literal", - /* 360 */ "literal_list ::= literal_list NK_COMMA signed_literal", - /* 361 */ "db_name ::= NK_ID", - /* 362 */ "table_name ::= NK_ID", - /* 363 */ "column_name ::= NK_ID", - /* 364 */ "function_name ::= NK_ID", - /* 365 */ "table_alias ::= NK_ID", - /* 366 */ "column_alias ::= NK_ID", - /* 367 */ "user_name ::= NK_ID", - /* 368 */ "topic_name ::= NK_ID", - /* 369 */ "stream_name ::= NK_ID", - /* 370 */ "cgroup_name ::= NK_ID", - /* 371 */ "index_name ::= NK_ID", - /* 372 */ "expr_or_subquery ::= expression", - /* 373 */ "expression ::= literal", - /* 374 */ "expression ::= pseudo_column", - /* 375 */ "expression ::= column_reference", - /* 376 */ "expression ::= function_expression", - /* 377 */ "expression ::= case_when_expression", - /* 378 */ "expression ::= NK_LP expression NK_RP", - /* 379 */ "expression ::= NK_PLUS expr_or_subquery", - /* 380 */ "expression ::= NK_MINUS expr_or_subquery", - /* 381 */ "expression ::= expr_or_subquery NK_PLUS expr_or_subquery", - /* 382 */ "expression ::= expr_or_subquery NK_MINUS expr_or_subquery", - /* 383 */ "expression ::= expr_or_subquery NK_STAR expr_or_subquery", - /* 384 */ "expression ::= expr_or_subquery NK_SLASH expr_or_subquery", - /* 385 */ "expression ::= expr_or_subquery NK_REM expr_or_subquery", - /* 386 */ "expression ::= column_reference NK_ARROW NK_STRING", - /* 387 */ "expression ::= expr_or_subquery NK_BITAND expr_or_subquery", - /* 388 */ "expression ::= expr_or_subquery NK_BITOR expr_or_subquery", - /* 389 */ "expression_list ::= expr_or_subquery", - /* 390 */ "expression_list ::= expression_list NK_COMMA expr_or_subquery", - /* 391 */ "column_reference ::= column_name", - /* 392 */ "column_reference ::= table_name NK_DOT column_name", - /* 393 */ "pseudo_column ::= ROWTS", - /* 394 */ "pseudo_column ::= TBNAME", - /* 395 */ "pseudo_column ::= table_name NK_DOT TBNAME", - /* 396 */ "pseudo_column ::= QSTART", - /* 397 */ "pseudo_column ::= QEND", - /* 398 */ "pseudo_column ::= QDURATION", - /* 399 */ "pseudo_column ::= WSTART", - /* 400 */ "pseudo_column ::= WEND", - /* 401 */ "pseudo_column ::= WDURATION", - /* 402 */ "pseudo_column ::= IROWTS", - /* 403 */ "pseudo_column ::= ISFILLED", - /* 404 */ "pseudo_column ::= QTAGS", - /* 405 */ "function_expression ::= function_name NK_LP expression_list NK_RP", - /* 406 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", - /* 407 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP", - /* 408 */ "function_expression ::= literal_func", - /* 409 */ "literal_func ::= noarg_func NK_LP NK_RP", - /* 410 */ "literal_func ::= NOW", - /* 411 */ "noarg_func ::= NOW", - /* 412 */ "noarg_func ::= TODAY", - /* 413 */ "noarg_func ::= TIMEZONE", - /* 414 */ "noarg_func ::= DATABASE", - /* 415 */ "noarg_func ::= CLIENT_VERSION", - /* 416 */ "noarg_func ::= SERVER_VERSION", - /* 417 */ "noarg_func ::= SERVER_STATUS", - /* 418 */ "noarg_func ::= CURRENT_USER", - /* 419 */ "noarg_func ::= USER", - /* 420 */ "star_func ::= COUNT", - /* 421 */ "star_func ::= FIRST", - /* 422 */ "star_func ::= LAST", - /* 423 */ "star_func ::= LAST_ROW", - /* 424 */ "star_func_para_list ::= NK_STAR", - /* 425 */ "star_func_para_list ::= other_para_list", - /* 426 */ "other_para_list ::= star_func_para", - /* 427 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", - /* 428 */ "star_func_para ::= expr_or_subquery", - /* 429 */ "star_func_para ::= table_name NK_DOT NK_STAR", - /* 430 */ "case_when_expression ::= CASE when_then_list case_when_else_opt END", - /* 431 */ "case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END", - /* 432 */ "when_then_list ::= when_then_expr", - /* 433 */ "when_then_list ::= when_then_list when_then_expr", - /* 434 */ "when_then_expr ::= WHEN common_expression THEN common_expression", - /* 435 */ "case_when_else_opt ::=", - /* 436 */ "case_when_else_opt ::= ELSE common_expression", - /* 437 */ "predicate ::= expr_or_subquery compare_op expr_or_subquery", - /* 438 */ "predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery", - /* 439 */ "predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery", - /* 440 */ "predicate ::= expr_or_subquery IS NULL", - /* 441 */ "predicate ::= expr_or_subquery IS NOT NULL", - /* 442 */ "predicate ::= expr_or_subquery in_op in_predicate_value", - /* 443 */ "compare_op ::= NK_LT", - /* 444 */ "compare_op ::= NK_GT", - /* 445 */ "compare_op ::= NK_LE", - /* 446 */ "compare_op ::= NK_GE", - /* 447 */ "compare_op ::= NK_NE", - /* 448 */ "compare_op ::= NK_EQ", - /* 449 */ "compare_op ::= LIKE", - /* 450 */ "compare_op ::= NOT LIKE", - /* 451 */ "compare_op ::= MATCH", - /* 452 */ "compare_op ::= NMATCH", - /* 453 */ "compare_op ::= CONTAINS", - /* 454 */ "in_op ::= IN", - /* 455 */ "in_op ::= NOT IN", - /* 456 */ "in_predicate_value ::= NK_LP literal_list NK_RP", - /* 457 */ "boolean_value_expression ::= boolean_primary", - /* 458 */ "boolean_value_expression ::= NOT boolean_primary", - /* 459 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", - /* 460 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", - /* 461 */ "boolean_primary ::= predicate", - /* 462 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", - /* 463 */ "common_expression ::= expr_or_subquery", - /* 464 */ "common_expression ::= boolean_value_expression", - /* 465 */ "from_clause_opt ::=", - /* 466 */ "from_clause_opt ::= FROM table_reference_list", - /* 467 */ "table_reference_list ::= table_reference", - /* 468 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", - /* 469 */ "table_reference ::= table_primary", - /* 470 */ "table_reference ::= joined_table", - /* 471 */ "table_primary ::= table_name alias_opt", - /* 472 */ "table_primary ::= db_name NK_DOT table_name alias_opt", - /* 473 */ "table_primary ::= subquery alias_opt", - /* 474 */ "table_primary ::= parenthesized_joined_table", - /* 475 */ "alias_opt ::=", - /* 476 */ "alias_opt ::= table_alias", - /* 477 */ "alias_opt ::= AS table_alias", - /* 478 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", - /* 479 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", - /* 480 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", - /* 481 */ "join_type ::=", - /* 482 */ "join_type ::= INNER", - /* 483 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", - /* 484 */ "set_quantifier_opt ::=", - /* 485 */ "set_quantifier_opt ::= DISTINCT", - /* 486 */ "set_quantifier_opt ::= ALL", - /* 487 */ "select_list ::= select_item", - /* 488 */ "select_list ::= select_list NK_COMMA select_item", - /* 489 */ "select_item ::= NK_STAR", - /* 490 */ "select_item ::= common_expression", - /* 491 */ "select_item ::= common_expression column_alias", - /* 492 */ "select_item ::= common_expression AS column_alias", - /* 493 */ "select_item ::= table_name NK_DOT NK_STAR", - /* 494 */ "where_clause_opt ::=", - /* 495 */ "where_clause_opt ::= WHERE search_condition", - /* 496 */ "partition_by_clause_opt ::=", - /* 497 */ "partition_by_clause_opt ::= PARTITION BY partition_list", - /* 498 */ "partition_list ::= partition_item", - /* 499 */ "partition_list ::= partition_list NK_COMMA partition_item", - /* 500 */ "partition_item ::= expr_or_subquery", - /* 501 */ "partition_item ::= expr_or_subquery column_alias", - /* 502 */ "partition_item ::= expr_or_subquery AS column_alias", - /* 503 */ "twindow_clause_opt ::=", - /* 504 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", - /* 505 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP", - /* 506 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", - /* 507 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", - /* 508 */ "twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition", - /* 509 */ "sliding_opt ::=", - /* 510 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", - /* 511 */ "fill_opt ::=", - /* 512 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", - /* 513 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", - /* 514 */ "fill_opt ::= FILL NK_LP VALUE_F NK_COMMA literal_list NK_RP", - /* 515 */ "fill_mode ::= NONE", - /* 516 */ "fill_mode ::= PREV", - /* 517 */ "fill_mode ::= NULL", - /* 518 */ "fill_mode ::= NULL_F", - /* 519 */ "fill_mode ::= LINEAR", - /* 520 */ "fill_mode ::= NEXT", - /* 521 */ "group_by_clause_opt ::=", - /* 522 */ "group_by_clause_opt ::= GROUP BY group_by_list", - /* 523 */ "group_by_list ::= expr_or_subquery", - /* 524 */ "group_by_list ::= group_by_list NK_COMMA expr_or_subquery", - /* 525 */ "having_clause_opt ::=", - /* 526 */ "having_clause_opt ::= HAVING search_condition", - /* 527 */ "range_opt ::=", - /* 528 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP", - /* 529 */ "every_opt ::=", - /* 530 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", - /* 531 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt", - /* 532 */ "query_simple ::= query_specification", - /* 533 */ "query_simple ::= union_query_expression", - /* 534 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery", - /* 535 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery", - /* 536 */ "query_simple_or_subquery ::= query_simple", - /* 537 */ "query_simple_or_subquery ::= subquery", - /* 538 */ "query_or_subquery ::= query_expression", - /* 539 */ "query_or_subquery ::= subquery", - /* 540 */ "order_by_clause_opt ::=", - /* 541 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", - /* 542 */ "slimit_clause_opt ::=", - /* 543 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", - /* 544 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", - /* 545 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 546 */ "limit_clause_opt ::=", - /* 547 */ "limit_clause_opt ::= LIMIT NK_INTEGER", - /* 548 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", - /* 549 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", - /* 550 */ "subquery ::= NK_LP query_expression NK_RP", - /* 551 */ "subquery ::= NK_LP subquery NK_RP", - /* 552 */ "search_condition ::= common_expression", - /* 553 */ "sort_specification_list ::= sort_specification", - /* 554 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", - /* 555 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt", - /* 556 */ "ordering_specification_opt ::=", - /* 557 */ "ordering_specification_opt ::= ASC", - /* 558 */ "ordering_specification_opt ::= DESC", - /* 559 */ "null_ordering_opt ::=", - /* 560 */ "null_ordering_opt ::= NULLS FIRST", - /* 561 */ "null_ordering_opt ::= NULLS LAST", + /* 319 */ "stream_options ::= stream_options DELETE_MARK duration_literal", + /* 320 */ "stream_options ::= stream_options IGNORE UPDATE NK_INTEGER", + /* 321 */ "subtable_opt ::=", + /* 322 */ "subtable_opt ::= SUBTABLE NK_LP expression NK_RP", + /* 323 */ "cmd ::= KILL CONNECTION NK_INTEGER", + /* 324 */ "cmd ::= KILL QUERY NK_STRING", + /* 325 */ "cmd ::= KILL TRANSACTION NK_INTEGER", + /* 326 */ "cmd ::= BALANCE VGROUP", + /* 327 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER", + /* 328 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list", + /* 329 */ "cmd ::= SPLIT VGROUP NK_INTEGER", + /* 330 */ "dnode_list ::= DNODE NK_INTEGER", + /* 331 */ "dnode_list ::= dnode_list DNODE NK_INTEGER", + /* 332 */ "cmd ::= DELETE FROM full_table_name where_clause_opt", + /* 333 */ "cmd ::= query_or_subquery", + /* 334 */ "cmd ::= insert_query", + /* 335 */ "insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery", + /* 336 */ "insert_query ::= INSERT INTO full_table_name query_or_subquery", + /* 337 */ "literal ::= NK_INTEGER", + /* 338 */ "literal ::= NK_FLOAT", + /* 339 */ "literal ::= NK_STRING", + /* 340 */ "literal ::= NK_BOOL", + /* 341 */ "literal ::= TIMESTAMP NK_STRING", + /* 342 */ "literal ::= duration_literal", + /* 343 */ "literal ::= NULL", + /* 344 */ "literal ::= NK_QUESTION", + /* 345 */ "duration_literal ::= NK_VARIABLE", + /* 346 */ "signed ::= NK_INTEGER", + /* 347 */ "signed ::= NK_PLUS NK_INTEGER", + /* 348 */ "signed ::= NK_MINUS NK_INTEGER", + /* 349 */ "signed ::= NK_FLOAT", + /* 350 */ "signed ::= NK_PLUS NK_FLOAT", + /* 351 */ "signed ::= NK_MINUS NK_FLOAT", + /* 352 */ "signed_literal ::= signed", + /* 353 */ "signed_literal ::= NK_STRING", + /* 354 */ "signed_literal ::= NK_BOOL", + /* 355 */ "signed_literal ::= TIMESTAMP NK_STRING", + /* 356 */ "signed_literal ::= duration_literal", + /* 357 */ "signed_literal ::= NULL", + /* 358 */ "signed_literal ::= literal_func", + /* 359 */ "signed_literal ::= NK_QUESTION", + /* 360 */ "literal_list ::= signed_literal", + /* 361 */ "literal_list ::= literal_list NK_COMMA signed_literal", + /* 362 */ "db_name ::= NK_ID", + /* 363 */ "table_name ::= NK_ID", + /* 364 */ "column_name ::= NK_ID", + /* 365 */ "function_name ::= NK_ID", + /* 366 */ "table_alias ::= NK_ID", + /* 367 */ "column_alias ::= NK_ID", + /* 368 */ "user_name ::= NK_ID", + /* 369 */ "topic_name ::= NK_ID", + /* 370 */ "stream_name ::= NK_ID", + /* 371 */ "cgroup_name ::= NK_ID", + /* 372 */ "index_name ::= NK_ID", + /* 373 */ "expr_or_subquery ::= expression", + /* 374 */ "expression ::= literal", + /* 375 */ "expression ::= pseudo_column", + /* 376 */ "expression ::= column_reference", + /* 377 */ "expression ::= function_expression", + /* 378 */ "expression ::= case_when_expression", + /* 379 */ "expression ::= NK_LP expression NK_RP", + /* 380 */ "expression ::= NK_PLUS expr_or_subquery", + /* 381 */ "expression ::= NK_MINUS expr_or_subquery", + /* 382 */ "expression ::= expr_or_subquery NK_PLUS expr_or_subquery", + /* 383 */ "expression ::= expr_or_subquery NK_MINUS expr_or_subquery", + /* 384 */ "expression ::= expr_or_subquery NK_STAR expr_or_subquery", + /* 385 */ "expression ::= expr_or_subquery NK_SLASH expr_or_subquery", + /* 386 */ "expression ::= expr_or_subquery NK_REM expr_or_subquery", + /* 387 */ "expression ::= column_reference NK_ARROW NK_STRING", + /* 388 */ "expression ::= expr_or_subquery NK_BITAND expr_or_subquery", + /* 389 */ "expression ::= expr_or_subquery NK_BITOR expr_or_subquery", + /* 390 */ "expression_list ::= expr_or_subquery", + /* 391 */ "expression_list ::= expression_list NK_COMMA expr_or_subquery", + /* 392 */ "column_reference ::= column_name", + /* 393 */ "column_reference ::= table_name NK_DOT column_name", + /* 394 */ "pseudo_column ::= ROWTS", + /* 395 */ "pseudo_column ::= TBNAME", + /* 396 */ "pseudo_column ::= table_name NK_DOT TBNAME", + /* 397 */ "pseudo_column ::= QSTART", + /* 398 */ "pseudo_column ::= QEND", + /* 399 */ "pseudo_column ::= QDURATION", + /* 400 */ "pseudo_column ::= WSTART", + /* 401 */ "pseudo_column ::= WEND", + /* 402 */ "pseudo_column ::= WDURATION", + /* 403 */ "pseudo_column ::= IROWTS", + /* 404 */ "pseudo_column ::= ISFILLED", + /* 405 */ "pseudo_column ::= QTAGS", + /* 406 */ "function_expression ::= function_name NK_LP expression_list NK_RP", + /* 407 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP", + /* 408 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP", + /* 409 */ "function_expression ::= literal_func", + /* 410 */ "literal_func ::= noarg_func NK_LP NK_RP", + /* 411 */ "literal_func ::= NOW", + /* 412 */ "noarg_func ::= NOW", + /* 413 */ "noarg_func ::= TODAY", + /* 414 */ "noarg_func ::= TIMEZONE", + /* 415 */ "noarg_func ::= DATABASE", + /* 416 */ "noarg_func ::= CLIENT_VERSION", + /* 417 */ "noarg_func ::= SERVER_VERSION", + /* 418 */ "noarg_func ::= SERVER_STATUS", + /* 419 */ "noarg_func ::= CURRENT_USER", + /* 420 */ "noarg_func ::= USER", + /* 421 */ "star_func ::= COUNT", + /* 422 */ "star_func ::= FIRST", + /* 423 */ "star_func ::= LAST", + /* 424 */ "star_func ::= LAST_ROW", + /* 425 */ "star_func_para_list ::= NK_STAR", + /* 426 */ "star_func_para_list ::= other_para_list", + /* 427 */ "other_para_list ::= star_func_para", + /* 428 */ "other_para_list ::= other_para_list NK_COMMA star_func_para", + /* 429 */ "star_func_para ::= expr_or_subquery", + /* 430 */ "star_func_para ::= table_name NK_DOT NK_STAR", + /* 431 */ "case_when_expression ::= CASE when_then_list case_when_else_opt END", + /* 432 */ "case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END", + /* 433 */ "when_then_list ::= when_then_expr", + /* 434 */ "when_then_list ::= when_then_list when_then_expr", + /* 435 */ "when_then_expr ::= WHEN common_expression THEN common_expression", + /* 436 */ "case_when_else_opt ::=", + /* 437 */ "case_when_else_opt ::= ELSE common_expression", + /* 438 */ "predicate ::= expr_or_subquery compare_op expr_or_subquery", + /* 439 */ "predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery", + /* 440 */ "predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery", + /* 441 */ "predicate ::= expr_or_subquery IS NULL", + /* 442 */ "predicate ::= expr_or_subquery IS NOT NULL", + /* 443 */ "predicate ::= expr_or_subquery in_op in_predicate_value", + /* 444 */ "compare_op ::= NK_LT", + /* 445 */ "compare_op ::= NK_GT", + /* 446 */ "compare_op ::= NK_LE", + /* 447 */ "compare_op ::= NK_GE", + /* 448 */ "compare_op ::= NK_NE", + /* 449 */ "compare_op ::= NK_EQ", + /* 450 */ "compare_op ::= LIKE", + /* 451 */ "compare_op ::= NOT LIKE", + /* 452 */ "compare_op ::= MATCH", + /* 453 */ "compare_op ::= NMATCH", + /* 454 */ "compare_op ::= CONTAINS", + /* 455 */ "in_op ::= IN", + /* 456 */ "in_op ::= NOT IN", + /* 457 */ "in_predicate_value ::= NK_LP literal_list NK_RP", + /* 458 */ "boolean_value_expression ::= boolean_primary", + /* 459 */ "boolean_value_expression ::= NOT boolean_primary", + /* 460 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression", + /* 461 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression", + /* 462 */ "boolean_primary ::= predicate", + /* 463 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP", + /* 464 */ "common_expression ::= expr_or_subquery", + /* 465 */ "common_expression ::= boolean_value_expression", + /* 466 */ "from_clause_opt ::=", + /* 467 */ "from_clause_opt ::= FROM table_reference_list", + /* 468 */ "table_reference_list ::= table_reference", + /* 469 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference", + /* 470 */ "table_reference ::= table_primary", + /* 471 */ "table_reference ::= joined_table", + /* 472 */ "table_primary ::= table_name alias_opt", + /* 473 */ "table_primary ::= db_name NK_DOT table_name alias_opt", + /* 474 */ "table_primary ::= subquery alias_opt", + /* 475 */ "table_primary ::= parenthesized_joined_table", + /* 476 */ "alias_opt ::=", + /* 477 */ "alias_opt ::= table_alias", + /* 478 */ "alias_opt ::= AS table_alias", + /* 479 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP", + /* 480 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP", + /* 481 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition", + /* 482 */ "join_type ::=", + /* 483 */ "join_type ::= INNER", + /* 484 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt", + /* 485 */ "set_quantifier_opt ::=", + /* 486 */ "set_quantifier_opt ::= DISTINCT", + /* 487 */ "set_quantifier_opt ::= ALL", + /* 488 */ "select_list ::= select_item", + /* 489 */ "select_list ::= select_list NK_COMMA select_item", + /* 490 */ "select_item ::= NK_STAR", + /* 491 */ "select_item ::= common_expression", + /* 492 */ "select_item ::= common_expression column_alias", + /* 493 */ "select_item ::= common_expression AS column_alias", + /* 494 */ "select_item ::= table_name NK_DOT NK_STAR", + /* 495 */ "where_clause_opt ::=", + /* 496 */ "where_clause_opt ::= WHERE search_condition", + /* 497 */ "partition_by_clause_opt ::=", + /* 498 */ "partition_by_clause_opt ::= PARTITION BY partition_list", + /* 499 */ "partition_list ::= partition_item", + /* 500 */ "partition_list ::= partition_list NK_COMMA partition_item", + /* 501 */ "partition_item ::= expr_or_subquery", + /* 502 */ "partition_item ::= expr_or_subquery column_alias", + /* 503 */ "partition_item ::= expr_or_subquery AS column_alias", + /* 504 */ "twindow_clause_opt ::=", + /* 505 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP", + /* 506 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP", + /* 507 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt", + /* 508 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt", + /* 509 */ "twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition", + /* 510 */ "sliding_opt ::=", + /* 511 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP", + /* 512 */ "fill_opt ::=", + /* 513 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP", + /* 514 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP", + /* 515 */ "fill_opt ::= FILL NK_LP VALUE_F NK_COMMA literal_list NK_RP", + /* 516 */ "fill_mode ::= NONE", + /* 517 */ "fill_mode ::= PREV", + /* 518 */ "fill_mode ::= NULL", + /* 519 */ "fill_mode ::= NULL_F", + /* 520 */ "fill_mode ::= LINEAR", + /* 521 */ "fill_mode ::= NEXT", + /* 522 */ "group_by_clause_opt ::=", + /* 523 */ "group_by_clause_opt ::= GROUP BY group_by_list", + /* 524 */ "group_by_list ::= expr_or_subquery", + /* 525 */ "group_by_list ::= group_by_list NK_COMMA expr_or_subquery", + /* 526 */ "having_clause_opt ::=", + /* 527 */ "having_clause_opt ::= HAVING search_condition", + /* 528 */ "range_opt ::=", + /* 529 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP", + /* 530 */ "every_opt ::=", + /* 531 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP", + /* 532 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt", + /* 533 */ "query_simple ::= query_specification", + /* 534 */ "query_simple ::= union_query_expression", + /* 535 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery", + /* 536 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery", + /* 537 */ "query_simple_or_subquery ::= query_simple", + /* 538 */ "query_simple_or_subquery ::= subquery", + /* 539 */ "query_or_subquery ::= query_expression", + /* 540 */ "query_or_subquery ::= subquery", + /* 541 */ "order_by_clause_opt ::=", + /* 542 */ "order_by_clause_opt ::= ORDER BY sort_specification_list", + /* 543 */ "slimit_clause_opt ::=", + /* 544 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER", + /* 545 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER", + /* 546 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 547 */ "limit_clause_opt ::=", + /* 548 */ "limit_clause_opt ::= LIMIT NK_INTEGER", + /* 549 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER", + /* 550 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER", + /* 551 */ "subquery ::= NK_LP query_expression NK_RP", + /* 552 */ "subquery ::= NK_LP subquery NK_RP", + /* 553 */ "search_condition ::= common_expression", + /* 554 */ "sort_specification_list ::= sort_specification", + /* 555 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification", + /* 556 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt", + /* 557 */ "ordering_specification_opt ::=", + /* 558 */ "ordering_specification_opt ::= ASC", + /* 559 */ "ordering_specification_opt ::= DESC", + /* 560 */ "null_ordering_opt ::=", + /* 561 */ "null_ordering_opt ::= NULLS FIRST", + /* 562 */ "null_ordering_opt ::= NULLS LAST", }; #endif /* NDEBUG */ @@ -3371,249 +3372,250 @@ static const struct { { 402, -3 }, /* (316) stream_options ::= stream_options WATERMARK duration_literal */ { 402, -4 }, /* (317) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ { 402, -3 }, /* (318) stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ - { 402, -4 }, /* (319) stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ - { 405, 0 }, /* (320) subtable_opt ::= */ - { 405, -4 }, /* (321) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ - { 328, -3 }, /* (322) cmd ::= KILL CONNECTION NK_INTEGER */ - { 328, -3 }, /* (323) cmd ::= KILL QUERY NK_STRING */ - { 328, -3 }, /* (324) cmd ::= KILL TRANSACTION NK_INTEGER */ - { 328, -2 }, /* (325) cmd ::= BALANCE VGROUP */ - { 328, -4 }, /* (326) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ - { 328, -4 }, /* (327) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ - { 328, -3 }, /* (328) cmd ::= SPLIT VGROUP NK_INTEGER */ - { 407, -2 }, /* (329) dnode_list ::= DNODE NK_INTEGER */ - { 407, -3 }, /* (330) dnode_list ::= dnode_list DNODE NK_INTEGER */ - { 328, -4 }, /* (331) cmd ::= DELETE FROM full_table_name where_clause_opt */ - { 328, -1 }, /* (332) cmd ::= query_or_subquery */ - { 328, -1 }, /* (333) cmd ::= insert_query */ - { 398, -7 }, /* (334) insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ - { 398, -4 }, /* (335) insert_query ::= INSERT INTO full_table_name query_or_subquery */ - { 331, -1 }, /* (336) literal ::= NK_INTEGER */ - { 331, -1 }, /* (337) literal ::= NK_FLOAT */ - { 331, -1 }, /* (338) literal ::= NK_STRING */ - { 331, -1 }, /* (339) literal ::= NK_BOOL */ - { 331, -2 }, /* (340) literal ::= TIMESTAMP NK_STRING */ - { 331, -1 }, /* (341) literal ::= duration_literal */ - { 331, -1 }, /* (342) literal ::= NULL */ - { 331, -1 }, /* (343) literal ::= NK_QUESTION */ - { 375, -1 }, /* (344) duration_literal ::= NK_VARIABLE */ - { 409, -1 }, /* (345) signed ::= NK_INTEGER */ - { 409, -2 }, /* (346) signed ::= NK_PLUS NK_INTEGER */ - { 409, -2 }, /* (347) signed ::= NK_MINUS NK_INTEGER */ - { 409, -1 }, /* (348) signed ::= NK_FLOAT */ - { 409, -2 }, /* (349) signed ::= NK_PLUS NK_FLOAT */ - { 409, -2 }, /* (350) signed ::= NK_MINUS NK_FLOAT */ - { 364, -1 }, /* (351) signed_literal ::= signed */ - { 364, -1 }, /* (352) signed_literal ::= NK_STRING */ - { 364, -1 }, /* (353) signed_literal ::= NK_BOOL */ - { 364, -2 }, /* (354) signed_literal ::= TIMESTAMP NK_STRING */ - { 364, -1 }, /* (355) signed_literal ::= duration_literal */ - { 364, -1 }, /* (356) signed_literal ::= NULL */ - { 364, -1 }, /* (357) signed_literal ::= literal_func */ - { 364, -1 }, /* (358) signed_literal ::= NK_QUESTION */ - { 411, -1 }, /* (359) literal_list ::= signed_literal */ - { 411, -3 }, /* (360) literal_list ::= literal_list NK_COMMA signed_literal */ - { 339, -1 }, /* (361) db_name ::= NK_ID */ - { 370, -1 }, /* (362) table_name ::= NK_ID */ - { 362, -1 }, /* (363) column_name ::= NK_ID */ - { 377, -1 }, /* (364) function_name ::= NK_ID */ - { 412, -1 }, /* (365) table_alias ::= NK_ID */ - { 385, -1 }, /* (366) column_alias ::= NK_ID */ - { 333, -1 }, /* (367) user_name ::= NK_ID */ - { 340, -1 }, /* (368) topic_name ::= NK_ID */ - { 401, -1 }, /* (369) stream_name ::= NK_ID */ - { 395, -1 }, /* (370) cgroup_name ::= NK_ID */ - { 388, -1 }, /* (371) index_name ::= NK_ID */ - { 413, -1 }, /* (372) expr_or_subquery ::= expression */ - { 406, -1 }, /* (373) expression ::= literal */ - { 406, -1 }, /* (374) expression ::= pseudo_column */ - { 406, -1 }, /* (375) expression ::= column_reference */ - { 406, -1 }, /* (376) expression ::= function_expression */ - { 406, -1 }, /* (377) expression ::= case_when_expression */ - { 406, -3 }, /* (378) expression ::= NK_LP expression NK_RP */ - { 406, -2 }, /* (379) expression ::= NK_PLUS expr_or_subquery */ - { 406, -2 }, /* (380) expression ::= NK_MINUS expr_or_subquery */ - { 406, -3 }, /* (381) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ - { 406, -3 }, /* (382) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ - { 406, -3 }, /* (383) expression ::= expr_or_subquery NK_STAR expr_or_subquery */ - { 406, -3 }, /* (384) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ - { 406, -3 }, /* (385) expression ::= expr_or_subquery NK_REM expr_or_subquery */ - { 406, -3 }, /* (386) expression ::= column_reference NK_ARROW NK_STRING */ - { 406, -3 }, /* (387) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ - { 406, -3 }, /* (388) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ - { 367, -1 }, /* (389) expression_list ::= expr_or_subquery */ - { 367, -3 }, /* (390) expression_list ::= expression_list NK_COMMA expr_or_subquery */ - { 415, -1 }, /* (391) column_reference ::= column_name */ - { 415, -3 }, /* (392) column_reference ::= table_name NK_DOT column_name */ - { 414, -1 }, /* (393) pseudo_column ::= ROWTS */ - { 414, -1 }, /* (394) pseudo_column ::= TBNAME */ - { 414, -3 }, /* (395) pseudo_column ::= table_name NK_DOT TBNAME */ - { 414, -1 }, /* (396) pseudo_column ::= QSTART */ - { 414, -1 }, /* (397) pseudo_column ::= QEND */ - { 414, -1 }, /* (398) pseudo_column ::= QDURATION */ - { 414, -1 }, /* (399) pseudo_column ::= WSTART */ - { 414, -1 }, /* (400) pseudo_column ::= WEND */ - { 414, -1 }, /* (401) pseudo_column ::= WDURATION */ - { 414, -1 }, /* (402) pseudo_column ::= IROWTS */ - { 414, -1 }, /* (403) pseudo_column ::= ISFILLED */ - { 414, -1 }, /* (404) pseudo_column ::= QTAGS */ - { 416, -4 }, /* (405) function_expression ::= function_name NK_LP expression_list NK_RP */ - { 416, -4 }, /* (406) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ - { 416, -6 }, /* (407) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ - { 416, -1 }, /* (408) function_expression ::= literal_func */ - { 410, -3 }, /* (409) literal_func ::= noarg_func NK_LP NK_RP */ - { 410, -1 }, /* (410) literal_func ::= NOW */ - { 420, -1 }, /* (411) noarg_func ::= NOW */ - { 420, -1 }, /* (412) noarg_func ::= TODAY */ - { 420, -1 }, /* (413) noarg_func ::= TIMEZONE */ - { 420, -1 }, /* (414) noarg_func ::= DATABASE */ - { 420, -1 }, /* (415) noarg_func ::= CLIENT_VERSION */ - { 420, -1 }, /* (416) noarg_func ::= SERVER_VERSION */ - { 420, -1 }, /* (417) noarg_func ::= SERVER_STATUS */ - { 420, -1 }, /* (418) noarg_func ::= CURRENT_USER */ - { 420, -1 }, /* (419) noarg_func ::= USER */ - { 418, -1 }, /* (420) star_func ::= COUNT */ - { 418, -1 }, /* (421) star_func ::= FIRST */ - { 418, -1 }, /* (422) star_func ::= LAST */ - { 418, -1 }, /* (423) star_func ::= LAST_ROW */ - { 419, -1 }, /* (424) star_func_para_list ::= NK_STAR */ - { 419, -1 }, /* (425) star_func_para_list ::= other_para_list */ - { 421, -1 }, /* (426) other_para_list ::= star_func_para */ - { 421, -3 }, /* (427) other_para_list ::= other_para_list NK_COMMA star_func_para */ - { 422, -1 }, /* (428) star_func_para ::= expr_or_subquery */ - { 422, -3 }, /* (429) star_func_para ::= table_name NK_DOT NK_STAR */ - { 417, -4 }, /* (430) case_when_expression ::= CASE when_then_list case_when_else_opt END */ - { 417, -5 }, /* (431) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ - { 423, -1 }, /* (432) when_then_list ::= when_then_expr */ - { 423, -2 }, /* (433) when_then_list ::= when_then_list when_then_expr */ - { 426, -4 }, /* (434) when_then_expr ::= WHEN common_expression THEN common_expression */ - { 424, 0 }, /* (435) case_when_else_opt ::= */ - { 424, -2 }, /* (436) case_when_else_opt ::= ELSE common_expression */ - { 427, -3 }, /* (437) predicate ::= expr_or_subquery compare_op expr_or_subquery */ - { 427, -5 }, /* (438) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ - { 427, -6 }, /* (439) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ - { 427, -3 }, /* (440) predicate ::= expr_or_subquery IS NULL */ - { 427, -4 }, /* (441) predicate ::= expr_or_subquery IS NOT NULL */ - { 427, -3 }, /* (442) predicate ::= expr_or_subquery in_op in_predicate_value */ - { 428, -1 }, /* (443) compare_op ::= NK_LT */ - { 428, -1 }, /* (444) compare_op ::= NK_GT */ - { 428, -1 }, /* (445) compare_op ::= NK_LE */ - { 428, -1 }, /* (446) compare_op ::= NK_GE */ - { 428, -1 }, /* (447) compare_op ::= NK_NE */ - { 428, -1 }, /* (448) compare_op ::= NK_EQ */ - { 428, -1 }, /* (449) compare_op ::= LIKE */ - { 428, -2 }, /* (450) compare_op ::= NOT LIKE */ - { 428, -1 }, /* (451) compare_op ::= MATCH */ - { 428, -1 }, /* (452) compare_op ::= NMATCH */ - { 428, -1 }, /* (453) compare_op ::= CONTAINS */ - { 429, -1 }, /* (454) in_op ::= IN */ - { 429, -2 }, /* (455) in_op ::= NOT IN */ - { 430, -3 }, /* (456) in_predicate_value ::= NK_LP literal_list NK_RP */ - { 431, -1 }, /* (457) boolean_value_expression ::= boolean_primary */ - { 431, -2 }, /* (458) boolean_value_expression ::= NOT boolean_primary */ - { 431, -3 }, /* (459) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ - { 431, -3 }, /* (460) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ - { 432, -1 }, /* (461) boolean_primary ::= predicate */ - { 432, -3 }, /* (462) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ - { 425, -1 }, /* (463) common_expression ::= expr_or_subquery */ - { 425, -1 }, /* (464) common_expression ::= boolean_value_expression */ - { 433, 0 }, /* (465) from_clause_opt ::= */ - { 433, -2 }, /* (466) from_clause_opt ::= FROM table_reference_list */ - { 434, -1 }, /* (467) table_reference_list ::= table_reference */ - { 434, -3 }, /* (468) table_reference_list ::= table_reference_list NK_COMMA table_reference */ - { 435, -1 }, /* (469) table_reference ::= table_primary */ - { 435, -1 }, /* (470) table_reference ::= joined_table */ - { 436, -2 }, /* (471) table_primary ::= table_name alias_opt */ - { 436, -4 }, /* (472) table_primary ::= db_name NK_DOT table_name alias_opt */ - { 436, -2 }, /* (473) table_primary ::= subquery alias_opt */ - { 436, -1 }, /* (474) table_primary ::= parenthesized_joined_table */ - { 438, 0 }, /* (475) alias_opt ::= */ - { 438, -1 }, /* (476) alias_opt ::= table_alias */ - { 438, -2 }, /* (477) alias_opt ::= AS table_alias */ - { 440, -3 }, /* (478) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - { 440, -3 }, /* (479) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ - { 437, -6 }, /* (480) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ - { 441, 0 }, /* (481) join_type ::= */ - { 441, -1 }, /* (482) join_type ::= INNER */ - { 443, -12 }, /* (483) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ - { 444, 0 }, /* (484) set_quantifier_opt ::= */ - { 444, -1 }, /* (485) set_quantifier_opt ::= DISTINCT */ - { 444, -1 }, /* (486) set_quantifier_opt ::= ALL */ - { 445, -1 }, /* (487) select_list ::= select_item */ - { 445, -3 }, /* (488) select_list ::= select_list NK_COMMA select_item */ - { 453, -1 }, /* (489) select_item ::= NK_STAR */ - { 453, -1 }, /* (490) select_item ::= common_expression */ - { 453, -2 }, /* (491) select_item ::= common_expression column_alias */ - { 453, -3 }, /* (492) select_item ::= common_expression AS column_alias */ - { 453, -3 }, /* (493) select_item ::= table_name NK_DOT NK_STAR */ - { 408, 0 }, /* (494) where_clause_opt ::= */ - { 408, -2 }, /* (495) where_clause_opt ::= WHERE search_condition */ - { 446, 0 }, /* (496) partition_by_clause_opt ::= */ - { 446, -3 }, /* (497) partition_by_clause_opt ::= PARTITION BY partition_list */ - { 454, -1 }, /* (498) partition_list ::= partition_item */ - { 454, -3 }, /* (499) partition_list ::= partition_list NK_COMMA partition_item */ - { 455, -1 }, /* (500) partition_item ::= expr_or_subquery */ - { 455, -2 }, /* (501) partition_item ::= expr_or_subquery column_alias */ - { 455, -3 }, /* (502) partition_item ::= expr_or_subquery AS column_alias */ - { 450, 0 }, /* (503) twindow_clause_opt ::= */ - { 450, -6 }, /* (504) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ - { 450, -4 }, /* (505) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ - { 450, -6 }, /* (506) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ - { 450, -8 }, /* (507) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ - { 450, -7 }, /* (508) twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ - { 390, 0 }, /* (509) sliding_opt ::= */ - { 390, -4 }, /* (510) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ - { 449, 0 }, /* (511) fill_opt ::= */ - { 449, -4 }, /* (512) fill_opt ::= FILL NK_LP fill_mode NK_RP */ - { 449, -6 }, /* (513) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ - { 449, -6 }, /* (514) fill_opt ::= FILL NK_LP VALUE_F NK_COMMA literal_list NK_RP */ - { 456, -1 }, /* (515) fill_mode ::= NONE */ - { 456, -1 }, /* (516) fill_mode ::= PREV */ - { 456, -1 }, /* (517) fill_mode ::= NULL */ - { 456, -1 }, /* (518) fill_mode ::= NULL_F */ - { 456, -1 }, /* (519) fill_mode ::= LINEAR */ - { 456, -1 }, /* (520) fill_mode ::= NEXT */ - { 451, 0 }, /* (521) group_by_clause_opt ::= */ - { 451, -3 }, /* (522) group_by_clause_opt ::= GROUP BY group_by_list */ - { 457, -1 }, /* (523) group_by_list ::= expr_or_subquery */ - { 457, -3 }, /* (524) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ - { 452, 0 }, /* (525) having_clause_opt ::= */ - { 452, -2 }, /* (526) having_clause_opt ::= HAVING search_condition */ - { 447, 0 }, /* (527) range_opt ::= */ - { 447, -6 }, /* (528) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ - { 448, 0 }, /* (529) every_opt ::= */ - { 448, -4 }, /* (530) every_opt ::= EVERY NK_LP duration_literal NK_RP */ - { 458, -4 }, /* (531) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ - { 459, -1 }, /* (532) query_simple ::= query_specification */ - { 459, -1 }, /* (533) query_simple ::= union_query_expression */ - { 463, -4 }, /* (534) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ - { 463, -3 }, /* (535) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ - { 464, -1 }, /* (536) query_simple_or_subquery ::= query_simple */ - { 464, -1 }, /* (537) query_simple_or_subquery ::= subquery */ - { 394, -1 }, /* (538) query_or_subquery ::= query_expression */ - { 394, -1 }, /* (539) query_or_subquery ::= subquery */ - { 460, 0 }, /* (540) order_by_clause_opt ::= */ - { 460, -3 }, /* (541) order_by_clause_opt ::= ORDER BY sort_specification_list */ - { 461, 0 }, /* (542) slimit_clause_opt ::= */ - { 461, -2 }, /* (543) slimit_clause_opt ::= SLIMIT NK_INTEGER */ - { 461, -4 }, /* (544) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - { 461, -4 }, /* (545) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 462, 0 }, /* (546) limit_clause_opt ::= */ - { 462, -2 }, /* (547) limit_clause_opt ::= LIMIT NK_INTEGER */ - { 462, -4 }, /* (548) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ - { 462, -4 }, /* (549) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - { 439, -3 }, /* (550) subquery ::= NK_LP query_expression NK_RP */ - { 439, -3 }, /* (551) subquery ::= NK_LP subquery NK_RP */ - { 442, -1 }, /* (552) search_condition ::= common_expression */ - { 465, -1 }, /* (553) sort_specification_list ::= sort_specification */ - { 465, -3 }, /* (554) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ - { 466, -3 }, /* (555) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ - { 467, 0 }, /* (556) ordering_specification_opt ::= */ - { 467, -1 }, /* (557) ordering_specification_opt ::= ASC */ - { 467, -1 }, /* (558) ordering_specification_opt ::= DESC */ - { 468, 0 }, /* (559) null_ordering_opt ::= */ - { 468, -2 }, /* (560) null_ordering_opt ::= NULLS FIRST */ - { 468, -2 }, /* (561) null_ordering_opt ::= NULLS LAST */ + { 402, -3 }, /* (319) stream_options ::= stream_options DELETE_MARK duration_literal */ + { 402, -4 }, /* (320) stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ + { 405, 0 }, /* (321) subtable_opt ::= */ + { 405, -4 }, /* (322) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ + { 328, -3 }, /* (323) cmd ::= KILL CONNECTION NK_INTEGER */ + { 328, -3 }, /* (324) cmd ::= KILL QUERY NK_STRING */ + { 328, -3 }, /* (325) cmd ::= KILL TRANSACTION NK_INTEGER */ + { 328, -2 }, /* (326) cmd ::= BALANCE VGROUP */ + { 328, -4 }, /* (327) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + { 328, -4 }, /* (328) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + { 328, -3 }, /* (329) cmd ::= SPLIT VGROUP NK_INTEGER */ + { 407, -2 }, /* (330) dnode_list ::= DNODE NK_INTEGER */ + { 407, -3 }, /* (331) dnode_list ::= dnode_list DNODE NK_INTEGER */ + { 328, -4 }, /* (332) cmd ::= DELETE FROM full_table_name where_clause_opt */ + { 328, -1 }, /* (333) cmd ::= query_or_subquery */ + { 328, -1 }, /* (334) cmd ::= insert_query */ + { 398, -7 }, /* (335) insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ + { 398, -4 }, /* (336) insert_query ::= INSERT INTO full_table_name query_or_subquery */ + { 331, -1 }, /* (337) literal ::= NK_INTEGER */ + { 331, -1 }, /* (338) literal ::= NK_FLOAT */ + { 331, -1 }, /* (339) literal ::= NK_STRING */ + { 331, -1 }, /* (340) literal ::= NK_BOOL */ + { 331, -2 }, /* (341) literal ::= TIMESTAMP NK_STRING */ + { 331, -1 }, /* (342) literal ::= duration_literal */ + { 331, -1 }, /* (343) literal ::= NULL */ + { 331, -1 }, /* (344) literal ::= NK_QUESTION */ + { 375, -1 }, /* (345) duration_literal ::= NK_VARIABLE */ + { 409, -1 }, /* (346) signed ::= NK_INTEGER */ + { 409, -2 }, /* (347) signed ::= NK_PLUS NK_INTEGER */ + { 409, -2 }, /* (348) signed ::= NK_MINUS NK_INTEGER */ + { 409, -1 }, /* (349) signed ::= NK_FLOAT */ + { 409, -2 }, /* (350) signed ::= NK_PLUS NK_FLOAT */ + { 409, -2 }, /* (351) signed ::= NK_MINUS NK_FLOAT */ + { 364, -1 }, /* (352) signed_literal ::= signed */ + { 364, -1 }, /* (353) signed_literal ::= NK_STRING */ + { 364, -1 }, /* (354) signed_literal ::= NK_BOOL */ + { 364, -2 }, /* (355) signed_literal ::= TIMESTAMP NK_STRING */ + { 364, -1 }, /* (356) signed_literal ::= duration_literal */ + { 364, -1 }, /* (357) signed_literal ::= NULL */ + { 364, -1 }, /* (358) signed_literal ::= literal_func */ + { 364, -1 }, /* (359) signed_literal ::= NK_QUESTION */ + { 411, -1 }, /* (360) literal_list ::= signed_literal */ + { 411, -3 }, /* (361) literal_list ::= literal_list NK_COMMA signed_literal */ + { 339, -1 }, /* (362) db_name ::= NK_ID */ + { 370, -1 }, /* (363) table_name ::= NK_ID */ + { 362, -1 }, /* (364) column_name ::= NK_ID */ + { 377, -1 }, /* (365) function_name ::= NK_ID */ + { 412, -1 }, /* (366) table_alias ::= NK_ID */ + { 385, -1 }, /* (367) column_alias ::= NK_ID */ + { 333, -1 }, /* (368) user_name ::= NK_ID */ + { 340, -1 }, /* (369) topic_name ::= NK_ID */ + { 401, -1 }, /* (370) stream_name ::= NK_ID */ + { 395, -1 }, /* (371) cgroup_name ::= NK_ID */ + { 388, -1 }, /* (372) index_name ::= NK_ID */ + { 413, -1 }, /* (373) expr_or_subquery ::= expression */ + { 406, -1 }, /* (374) expression ::= literal */ + { 406, -1 }, /* (375) expression ::= pseudo_column */ + { 406, -1 }, /* (376) expression ::= column_reference */ + { 406, -1 }, /* (377) expression ::= function_expression */ + { 406, -1 }, /* (378) expression ::= case_when_expression */ + { 406, -3 }, /* (379) expression ::= NK_LP expression NK_RP */ + { 406, -2 }, /* (380) expression ::= NK_PLUS expr_or_subquery */ + { 406, -2 }, /* (381) expression ::= NK_MINUS expr_or_subquery */ + { 406, -3 }, /* (382) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ + { 406, -3 }, /* (383) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ + { 406, -3 }, /* (384) expression ::= expr_or_subquery NK_STAR expr_or_subquery */ + { 406, -3 }, /* (385) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ + { 406, -3 }, /* (386) expression ::= expr_or_subquery NK_REM expr_or_subquery */ + { 406, -3 }, /* (387) expression ::= column_reference NK_ARROW NK_STRING */ + { 406, -3 }, /* (388) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ + { 406, -3 }, /* (389) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ + { 367, -1 }, /* (390) expression_list ::= expr_or_subquery */ + { 367, -3 }, /* (391) expression_list ::= expression_list NK_COMMA expr_or_subquery */ + { 415, -1 }, /* (392) column_reference ::= column_name */ + { 415, -3 }, /* (393) column_reference ::= table_name NK_DOT column_name */ + { 414, -1 }, /* (394) pseudo_column ::= ROWTS */ + { 414, -1 }, /* (395) pseudo_column ::= TBNAME */ + { 414, -3 }, /* (396) pseudo_column ::= table_name NK_DOT TBNAME */ + { 414, -1 }, /* (397) pseudo_column ::= QSTART */ + { 414, -1 }, /* (398) pseudo_column ::= QEND */ + { 414, -1 }, /* (399) pseudo_column ::= QDURATION */ + { 414, -1 }, /* (400) pseudo_column ::= WSTART */ + { 414, -1 }, /* (401) pseudo_column ::= WEND */ + { 414, -1 }, /* (402) pseudo_column ::= WDURATION */ + { 414, -1 }, /* (403) pseudo_column ::= IROWTS */ + { 414, -1 }, /* (404) pseudo_column ::= ISFILLED */ + { 414, -1 }, /* (405) pseudo_column ::= QTAGS */ + { 416, -4 }, /* (406) function_expression ::= function_name NK_LP expression_list NK_RP */ + { 416, -4 }, /* (407) function_expression ::= star_func NK_LP star_func_para_list NK_RP */ + { 416, -6 }, /* (408) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ + { 416, -1 }, /* (409) function_expression ::= literal_func */ + { 410, -3 }, /* (410) literal_func ::= noarg_func NK_LP NK_RP */ + { 410, -1 }, /* (411) literal_func ::= NOW */ + { 420, -1 }, /* (412) noarg_func ::= NOW */ + { 420, -1 }, /* (413) noarg_func ::= TODAY */ + { 420, -1 }, /* (414) noarg_func ::= TIMEZONE */ + { 420, -1 }, /* (415) noarg_func ::= DATABASE */ + { 420, -1 }, /* (416) noarg_func ::= CLIENT_VERSION */ + { 420, -1 }, /* (417) noarg_func ::= SERVER_VERSION */ + { 420, -1 }, /* (418) noarg_func ::= SERVER_STATUS */ + { 420, -1 }, /* (419) noarg_func ::= CURRENT_USER */ + { 420, -1 }, /* (420) noarg_func ::= USER */ + { 418, -1 }, /* (421) star_func ::= COUNT */ + { 418, -1 }, /* (422) star_func ::= FIRST */ + { 418, -1 }, /* (423) star_func ::= LAST */ + { 418, -1 }, /* (424) star_func ::= LAST_ROW */ + { 419, -1 }, /* (425) star_func_para_list ::= NK_STAR */ + { 419, -1 }, /* (426) star_func_para_list ::= other_para_list */ + { 421, -1 }, /* (427) other_para_list ::= star_func_para */ + { 421, -3 }, /* (428) other_para_list ::= other_para_list NK_COMMA star_func_para */ + { 422, -1 }, /* (429) star_func_para ::= expr_or_subquery */ + { 422, -3 }, /* (430) star_func_para ::= table_name NK_DOT NK_STAR */ + { 417, -4 }, /* (431) case_when_expression ::= CASE when_then_list case_when_else_opt END */ + { 417, -5 }, /* (432) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ + { 423, -1 }, /* (433) when_then_list ::= when_then_expr */ + { 423, -2 }, /* (434) when_then_list ::= when_then_list when_then_expr */ + { 426, -4 }, /* (435) when_then_expr ::= WHEN common_expression THEN common_expression */ + { 424, 0 }, /* (436) case_when_else_opt ::= */ + { 424, -2 }, /* (437) case_when_else_opt ::= ELSE common_expression */ + { 427, -3 }, /* (438) predicate ::= expr_or_subquery compare_op expr_or_subquery */ + { 427, -5 }, /* (439) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ + { 427, -6 }, /* (440) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ + { 427, -3 }, /* (441) predicate ::= expr_or_subquery IS NULL */ + { 427, -4 }, /* (442) predicate ::= expr_or_subquery IS NOT NULL */ + { 427, -3 }, /* (443) predicate ::= expr_or_subquery in_op in_predicate_value */ + { 428, -1 }, /* (444) compare_op ::= NK_LT */ + { 428, -1 }, /* (445) compare_op ::= NK_GT */ + { 428, -1 }, /* (446) compare_op ::= NK_LE */ + { 428, -1 }, /* (447) compare_op ::= NK_GE */ + { 428, -1 }, /* (448) compare_op ::= NK_NE */ + { 428, -1 }, /* (449) compare_op ::= NK_EQ */ + { 428, -1 }, /* (450) compare_op ::= LIKE */ + { 428, -2 }, /* (451) compare_op ::= NOT LIKE */ + { 428, -1 }, /* (452) compare_op ::= MATCH */ + { 428, -1 }, /* (453) compare_op ::= NMATCH */ + { 428, -1 }, /* (454) compare_op ::= CONTAINS */ + { 429, -1 }, /* (455) in_op ::= IN */ + { 429, -2 }, /* (456) in_op ::= NOT IN */ + { 430, -3 }, /* (457) in_predicate_value ::= NK_LP literal_list NK_RP */ + { 431, -1 }, /* (458) boolean_value_expression ::= boolean_primary */ + { 431, -2 }, /* (459) boolean_value_expression ::= NOT boolean_primary */ + { 431, -3 }, /* (460) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + { 431, -3 }, /* (461) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + { 432, -1 }, /* (462) boolean_primary ::= predicate */ + { 432, -3 }, /* (463) boolean_primary ::= NK_LP boolean_value_expression NK_RP */ + { 425, -1 }, /* (464) common_expression ::= expr_or_subquery */ + { 425, -1 }, /* (465) common_expression ::= boolean_value_expression */ + { 433, 0 }, /* (466) from_clause_opt ::= */ + { 433, -2 }, /* (467) from_clause_opt ::= FROM table_reference_list */ + { 434, -1 }, /* (468) table_reference_list ::= table_reference */ + { 434, -3 }, /* (469) table_reference_list ::= table_reference_list NK_COMMA table_reference */ + { 435, -1 }, /* (470) table_reference ::= table_primary */ + { 435, -1 }, /* (471) table_reference ::= joined_table */ + { 436, -2 }, /* (472) table_primary ::= table_name alias_opt */ + { 436, -4 }, /* (473) table_primary ::= db_name NK_DOT table_name alias_opt */ + { 436, -2 }, /* (474) table_primary ::= subquery alias_opt */ + { 436, -1 }, /* (475) table_primary ::= parenthesized_joined_table */ + { 438, 0 }, /* (476) alias_opt ::= */ + { 438, -1 }, /* (477) alias_opt ::= table_alias */ + { 438, -2 }, /* (478) alias_opt ::= AS table_alias */ + { 440, -3 }, /* (479) parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + { 440, -3 }, /* (480) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ + { 437, -6 }, /* (481) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + { 441, 0 }, /* (482) join_type ::= */ + { 441, -1 }, /* (483) join_type ::= INNER */ + { 443, -12 }, /* (484) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + { 444, 0 }, /* (485) set_quantifier_opt ::= */ + { 444, -1 }, /* (486) set_quantifier_opt ::= DISTINCT */ + { 444, -1 }, /* (487) set_quantifier_opt ::= ALL */ + { 445, -1 }, /* (488) select_list ::= select_item */ + { 445, -3 }, /* (489) select_list ::= select_list NK_COMMA select_item */ + { 453, -1 }, /* (490) select_item ::= NK_STAR */ + { 453, -1 }, /* (491) select_item ::= common_expression */ + { 453, -2 }, /* (492) select_item ::= common_expression column_alias */ + { 453, -3 }, /* (493) select_item ::= common_expression AS column_alias */ + { 453, -3 }, /* (494) select_item ::= table_name NK_DOT NK_STAR */ + { 408, 0 }, /* (495) where_clause_opt ::= */ + { 408, -2 }, /* (496) where_clause_opt ::= WHERE search_condition */ + { 446, 0 }, /* (497) partition_by_clause_opt ::= */ + { 446, -3 }, /* (498) partition_by_clause_opt ::= PARTITION BY partition_list */ + { 454, -1 }, /* (499) partition_list ::= partition_item */ + { 454, -3 }, /* (500) partition_list ::= partition_list NK_COMMA partition_item */ + { 455, -1 }, /* (501) partition_item ::= expr_or_subquery */ + { 455, -2 }, /* (502) partition_item ::= expr_or_subquery column_alias */ + { 455, -3 }, /* (503) partition_item ::= expr_or_subquery AS column_alias */ + { 450, 0 }, /* (504) twindow_clause_opt ::= */ + { 450, -6 }, /* (505) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + { 450, -4 }, /* (506) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ + { 450, -6 }, /* (507) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + { 450, -8 }, /* (508) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + { 450, -7 }, /* (509) twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ + { 390, 0 }, /* (510) sliding_opt ::= */ + { 390, -4 }, /* (511) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ + { 449, 0 }, /* (512) fill_opt ::= */ + { 449, -4 }, /* (513) fill_opt ::= FILL NK_LP fill_mode NK_RP */ + { 449, -6 }, /* (514) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + { 449, -6 }, /* (515) fill_opt ::= FILL NK_LP VALUE_F NK_COMMA literal_list NK_RP */ + { 456, -1 }, /* (516) fill_mode ::= NONE */ + { 456, -1 }, /* (517) fill_mode ::= PREV */ + { 456, -1 }, /* (518) fill_mode ::= NULL */ + { 456, -1 }, /* (519) fill_mode ::= NULL_F */ + { 456, -1 }, /* (520) fill_mode ::= LINEAR */ + { 456, -1 }, /* (521) fill_mode ::= NEXT */ + { 451, 0 }, /* (522) group_by_clause_opt ::= */ + { 451, -3 }, /* (523) group_by_clause_opt ::= GROUP BY group_by_list */ + { 457, -1 }, /* (524) group_by_list ::= expr_or_subquery */ + { 457, -3 }, /* (525) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ + { 452, 0 }, /* (526) having_clause_opt ::= */ + { 452, -2 }, /* (527) having_clause_opt ::= HAVING search_condition */ + { 447, 0 }, /* (528) range_opt ::= */ + { 447, -6 }, /* (529) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ + { 448, 0 }, /* (530) every_opt ::= */ + { 448, -4 }, /* (531) every_opt ::= EVERY NK_LP duration_literal NK_RP */ + { 458, -4 }, /* (532) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ + { 459, -1 }, /* (533) query_simple ::= query_specification */ + { 459, -1 }, /* (534) query_simple ::= union_query_expression */ + { 463, -4 }, /* (535) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ + { 463, -3 }, /* (536) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ + { 464, -1 }, /* (537) query_simple_or_subquery ::= query_simple */ + { 464, -1 }, /* (538) query_simple_or_subquery ::= subquery */ + { 394, -1 }, /* (539) query_or_subquery ::= query_expression */ + { 394, -1 }, /* (540) query_or_subquery ::= subquery */ + { 460, 0 }, /* (541) order_by_clause_opt ::= */ + { 460, -3 }, /* (542) order_by_clause_opt ::= ORDER BY sort_specification_list */ + { 461, 0 }, /* (543) slimit_clause_opt ::= */ + { 461, -2 }, /* (544) slimit_clause_opt ::= SLIMIT NK_INTEGER */ + { 461, -4 }, /* (545) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + { 461, -4 }, /* (546) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 462, 0 }, /* (547) limit_clause_opt ::= */ + { 462, -2 }, /* (548) limit_clause_opt ::= LIMIT NK_INTEGER */ + { 462, -4 }, /* (549) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ + { 462, -4 }, /* (550) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + { 439, -3 }, /* (551) subquery ::= NK_LP query_expression NK_RP */ + { 439, -3 }, /* (552) subquery ::= NK_LP subquery NK_RP */ + { 442, -1 }, /* (553) search_condition ::= common_expression */ + { 465, -1 }, /* (554) sort_specification_list ::= sort_specification */ + { 465, -3 }, /* (555) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ + { 466, -3 }, /* (556) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ + { 467, 0 }, /* (557) ordering_specification_opt ::= */ + { 467, -1 }, /* (558) ordering_specification_opt ::= ASC */ + { 467, -1 }, /* (559) ordering_specification_opt ::= DESC */ + { 468, 0 }, /* (560) null_ordering_opt ::= */ + { 468, -2 }, /* (561) null_ordering_opt ::= NULLS FIRST */ + { 468, -2 }, /* (562) null_ordering_opt ::= NULLS LAST */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3807,7 +3809,7 @@ static YYACTIONTYPE yy_reduce( break; case 42: /* priv_level ::= topic_name */ case 273: /* sma_func_name ::= function_name */ yytestcase(yyruleno==273); - case 476: /* alias_opt ::= table_alias */ yytestcase(yyruleno==476); + case 477: /* alias_opt ::= table_alias */ yytestcase(yyruleno==477); { yylhsminor.yy225 = yymsp[0].minor.yy225; } yymsp[0].minor.yy225 = yylhsminor.yy225; break; @@ -3842,30 +3844,30 @@ static YYACTIONTYPE yy_reduce( case 275: /* sma_func_name ::= FIRST */ yytestcase(yyruleno==275); case 276: /* sma_func_name ::= LAST */ yytestcase(yyruleno==276); case 277: /* sma_func_name ::= LAST_ROW */ yytestcase(yyruleno==277); - case 361: /* db_name ::= NK_ID */ yytestcase(yyruleno==361); - case 362: /* table_name ::= NK_ID */ yytestcase(yyruleno==362); - case 363: /* column_name ::= NK_ID */ yytestcase(yyruleno==363); - case 364: /* function_name ::= NK_ID */ yytestcase(yyruleno==364); - case 365: /* table_alias ::= NK_ID */ yytestcase(yyruleno==365); - case 366: /* column_alias ::= NK_ID */ yytestcase(yyruleno==366); - case 367: /* user_name ::= NK_ID */ yytestcase(yyruleno==367); - case 368: /* topic_name ::= NK_ID */ yytestcase(yyruleno==368); - case 369: /* stream_name ::= NK_ID */ yytestcase(yyruleno==369); - case 370: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==370); - case 371: /* index_name ::= NK_ID */ yytestcase(yyruleno==371); - case 411: /* noarg_func ::= NOW */ yytestcase(yyruleno==411); - case 412: /* noarg_func ::= TODAY */ yytestcase(yyruleno==412); - case 413: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==413); - case 414: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==414); - case 415: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==415); - case 416: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==416); - case 417: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==417); - case 418: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==418); - case 419: /* noarg_func ::= USER */ yytestcase(yyruleno==419); - case 420: /* star_func ::= COUNT */ yytestcase(yyruleno==420); - case 421: /* star_func ::= FIRST */ yytestcase(yyruleno==421); - case 422: /* star_func ::= LAST */ yytestcase(yyruleno==422); - case 423: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==423); + case 362: /* db_name ::= NK_ID */ yytestcase(yyruleno==362); + case 363: /* table_name ::= NK_ID */ yytestcase(yyruleno==363); + case 364: /* column_name ::= NK_ID */ yytestcase(yyruleno==364); + case 365: /* function_name ::= NK_ID */ yytestcase(yyruleno==365); + case 366: /* table_alias ::= NK_ID */ yytestcase(yyruleno==366); + case 367: /* column_alias ::= NK_ID */ yytestcase(yyruleno==367); + case 368: /* user_name ::= NK_ID */ yytestcase(yyruleno==368); + case 369: /* topic_name ::= NK_ID */ yytestcase(yyruleno==369); + case 370: /* stream_name ::= NK_ID */ yytestcase(yyruleno==370); + case 371: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==371); + case 372: /* index_name ::= NK_ID */ yytestcase(yyruleno==372); + case 412: /* noarg_func ::= NOW */ yytestcase(yyruleno==412); + case 413: /* noarg_func ::= TODAY */ yytestcase(yyruleno==413); + case 414: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==414); + case 415: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==415); + case 416: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==416); + case 417: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==417); + case 418: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==418); + case 419: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==419); + case 420: /* noarg_func ::= USER */ yytestcase(yyruleno==420); + case 421: /* star_func ::= COUNT */ yytestcase(yyruleno==421); + case 422: /* star_func ::= FIRST */ yytestcase(yyruleno==422); + case 423: /* star_func ::= LAST */ yytestcase(yyruleno==423); + case 424: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==424); { yylhsminor.yy225 = yymsp[0].minor.yy0; } yymsp[0].minor.yy225 = yylhsminor.yy225; break; @@ -3874,13 +3876,13 @@ static YYACTIONTYPE yy_reduce( case 76: /* exists_opt ::= */ yytestcase(yyruleno==76); case 294: /* analyze_opt ::= */ yytestcase(yyruleno==294); case 301: /* agg_func_opt ::= */ yytestcase(yyruleno==301); - case 484: /* set_quantifier_opt ::= */ yytestcase(yyruleno==484); + case 485: /* set_quantifier_opt ::= */ yytestcase(yyruleno==485); { yymsp[1].minor.yy103 = false; } break; case 55: /* force_opt ::= FORCE */ case 295: /* analyze_opt ::= ANALYZE */ yytestcase(yyruleno==295); case 302: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==302); - case 485: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==485); + case 486: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==486); { yymsp[0].minor.yy103 = true; } break; case 56: /* cmd ::= ALTER LOCAL NK_STRING */ @@ -4106,7 +4108,7 @@ static YYACTIONTYPE yy_reduce( yymsp[0].minor.yy110 = yylhsminor.yy110; break; case 121: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ - case 330: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==330); + case 331: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==331); { yylhsminor.yy110 = addNodeToList(pCxt, yymsp[-2].minor.yy110, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } yymsp[-2].minor.yy110 = yylhsminor.yy110; break; @@ -4126,12 +4128,12 @@ static YYACTIONTYPE yy_reduce( case 205: /* col_name_list ::= col_name */ yytestcase(yyruleno==205); case 256: /* tag_list_opt ::= tag_item */ yytestcase(yyruleno==256); case 270: /* func_list ::= func */ yytestcase(yyruleno==270); - case 359: /* literal_list ::= signed_literal */ yytestcase(yyruleno==359); - case 426: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==426); - case 432: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==432); - case 487: /* select_list ::= select_item */ yytestcase(yyruleno==487); - case 498: /* partition_list ::= partition_item */ yytestcase(yyruleno==498); - case 553: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==553); + case 360: /* literal_list ::= signed_literal */ yytestcase(yyruleno==360); + case 427: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==427); + case 433: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==433); + case 488: /* select_list ::= select_item */ yytestcase(yyruleno==488); + case 499: /* partition_list ::= partition_item */ yytestcase(yyruleno==499); + case 554: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==554); { yylhsminor.yy110 = createNodeList(pCxt, yymsp[0].minor.yy42); } yymsp[0].minor.yy110 = yylhsminor.yy110; break; @@ -4141,11 +4143,11 @@ static YYACTIONTYPE yy_reduce( case 206: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==206); case 257: /* tag_list_opt ::= tag_list_opt NK_COMMA tag_item */ yytestcase(yyruleno==257); case 271: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==271); - case 360: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==360); - case 427: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==427); - case 488: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==488); - case 499: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==499); - case 554: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==554); + case 361: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==361); + case 428: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==428); + case 489: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==489); + case 500: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==500); + case 555: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==555); { yylhsminor.yy110 = addNodeToList(pCxt, yymsp[-2].minor.yy110, yymsp[0].minor.yy42); } yymsp[-2].minor.yy110 = yylhsminor.yy110; break; @@ -4175,8 +4177,8 @@ static YYACTIONTYPE yy_reduce( { pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy103, yymsp[0].minor.yy42); } break; case 134: /* cmd ::= ALTER TABLE alter_table_clause */ - case 332: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==332); - case 333: /* cmd ::= insert_query */ yytestcase(yyruleno==333); + case 333: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==333); + case 334: /* cmd ::= insert_query */ yytestcase(yyruleno==334); { pCxt->pRootNode = yymsp[0].minor.yy42; } break; case 135: /* cmd ::= ALTER STABLE alter_table_clause */ @@ -4224,7 +4226,7 @@ static YYACTIONTYPE yy_reduce( break; case 147: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ case 150: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==150); - case 433: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==433); + case 434: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==434); { yylhsminor.yy110 = addNodeToList(pCxt, yymsp[-1].minor.yy110, yymsp[0].minor.yy42); } yymsp[-1].minor.yy110 = yylhsminor.yy110; break; @@ -4241,9 +4243,9 @@ static YYACTIONTYPE yy_reduce( case 255: /* tag_list_opt ::= */ yytestcase(yyruleno==255); case 307: /* col_list_opt ::= */ yytestcase(yyruleno==307); case 309: /* tag_def_or_ref_opt ::= */ yytestcase(yyruleno==309); - case 496: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==496); - case 521: /* group_by_clause_opt ::= */ yytestcase(yyruleno==521); - case 540: /* order_by_clause_opt ::= */ yytestcase(yyruleno==540); + case 497: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==497); + case 522: /* group_by_clause_opt ::= */ yytestcase(yyruleno==522); + case 541: /* order_by_clause_opt ::= */ yytestcase(yyruleno==541); { yymsp[1].minor.yy110 = NULL; } break; case 153: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */ @@ -4335,7 +4337,7 @@ static YYACTIONTYPE yy_reduce( break; case 184: /* tags_def_opt ::= tags_def */ case 310: /* tag_def_or_ref_opt ::= tags_def */ yytestcase(yyruleno==310); - case 425: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==425); + case 426: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==426); { yylhsminor.yy110 = yymsp[0].minor.yy110; } yymsp[0].minor.yy110 = yylhsminor.yy110; break; @@ -4389,12 +4391,12 @@ static YYACTIONTYPE yy_reduce( { yymsp[-1].minor.yy459.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy459.val = yymsp[0].minor.yy0; } break; case 198: /* duration_list ::= duration_literal */ - case 389: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==389); + case 390: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==390); { yylhsminor.yy110 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy42)); } yymsp[0].minor.yy110 = yylhsminor.yy110; break; case 199: /* duration_list ::= duration_list NK_COMMA duration_literal */ - case 390: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==390); + case 391: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==391); { yylhsminor.yy110 = addNodeToList(pCxt, yymsp[-2].minor.yy110, releaseRawExprNode(pCxt, yymsp[0].minor.yy42)); } yymsp[-2].minor.yy110 = yylhsminor.yy110; break; @@ -4538,18 +4540,18 @@ static YYACTIONTYPE yy_reduce( yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 250: /* like_pattern_opt ::= */ - case 320: /* subtable_opt ::= */ yytestcase(yyruleno==320); - case 435: /* case_when_else_opt ::= */ yytestcase(yyruleno==435); - case 465: /* from_clause_opt ::= */ yytestcase(yyruleno==465); - case 494: /* where_clause_opt ::= */ yytestcase(yyruleno==494); - case 503: /* twindow_clause_opt ::= */ yytestcase(yyruleno==503); - case 509: /* sliding_opt ::= */ yytestcase(yyruleno==509); - case 511: /* fill_opt ::= */ yytestcase(yyruleno==511); - case 525: /* having_clause_opt ::= */ yytestcase(yyruleno==525); - case 527: /* range_opt ::= */ yytestcase(yyruleno==527); - case 529: /* every_opt ::= */ yytestcase(yyruleno==529); - case 542: /* slimit_clause_opt ::= */ yytestcase(yyruleno==542); - case 546: /* limit_clause_opt ::= */ yytestcase(yyruleno==546); + case 321: /* subtable_opt ::= */ yytestcase(yyruleno==321); + case 436: /* case_when_else_opt ::= */ yytestcase(yyruleno==436); + case 466: /* from_clause_opt ::= */ yytestcase(yyruleno==466); + case 495: /* where_clause_opt ::= */ yytestcase(yyruleno==495); + case 504: /* twindow_clause_opt ::= */ yytestcase(yyruleno==504); + case 510: /* sliding_opt ::= */ yytestcase(yyruleno==510); + case 512: /* fill_opt ::= */ yytestcase(yyruleno==512); + case 526: /* having_clause_opt ::= */ yytestcase(yyruleno==526); + case 528: /* range_opt ::= */ yytestcase(yyruleno==528); + case 530: /* every_opt ::= */ yytestcase(yyruleno==530); + case 543: /* slimit_clause_opt ::= */ yytestcase(yyruleno==543); + case 547: /* limit_clause_opt ::= */ yytestcase(yyruleno==547); { yymsp[1].minor.yy42 = NULL; } break; case 251: /* like_pattern_opt ::= LIKE NK_STRING */ @@ -4615,6 +4617,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-2].minor.yy42 = yylhsminor.yy42; break; case 281: /* sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ + case 319: /* stream_options ::= stream_options DELETE_MARK duration_literal */ yytestcase(yyruleno==319); { ((SStreamOptions*)yymsp[-2].minor.yy42)->pDeleteMark = releaseRawExprNode(pCxt, yymsp[0].minor.yy42); yylhsminor.yy42 = yymsp[-2].minor.yy42; } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; @@ -4693,112 +4696,112 @@ static YYACTIONTYPE yy_reduce( { ((SStreamOptions*)yymsp[-2].minor.yy42)->fillHistory = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy42 = yymsp[-2].minor.yy42; } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 319: /* stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ + case 320: /* stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ { ((SStreamOptions*)yymsp[-3].minor.yy42)->ignoreUpdate = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy42 = yymsp[-3].minor.yy42; } yymsp[-3].minor.yy42 = yylhsminor.yy42; break; - case 321: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ - case 510: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ yytestcase(yyruleno==510); - case 530: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==530); + case 322: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ + case 511: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ yytestcase(yyruleno==511); + case 531: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==531); { yymsp[-3].minor.yy42 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy42); } break; - case 322: /* cmd ::= KILL CONNECTION NK_INTEGER */ + case 323: /* cmd ::= KILL CONNECTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } break; - case 323: /* cmd ::= KILL QUERY NK_STRING */ + case 324: /* cmd ::= KILL QUERY NK_STRING */ { pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 324: /* cmd ::= KILL TRANSACTION NK_INTEGER */ + case 325: /* cmd ::= KILL TRANSACTION NK_INTEGER */ { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } break; - case 325: /* cmd ::= BALANCE VGROUP */ + case 326: /* cmd ::= BALANCE VGROUP */ { pCxt->pRootNode = createBalanceVgroupStmt(pCxt); } break; - case 326: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ + case 327: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 327: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ + case 328: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ { pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy110); } break; - case 328: /* cmd ::= SPLIT VGROUP NK_INTEGER */ + case 329: /* cmd ::= SPLIT VGROUP NK_INTEGER */ { pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } break; - case 329: /* dnode_list ::= DNODE NK_INTEGER */ + case 330: /* dnode_list ::= DNODE NK_INTEGER */ { yymsp[-1].minor.yy110 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } break; - case 331: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ + case 332: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ { pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy42, yymsp[0].minor.yy42); } break; - case 334: /* insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ + case 335: /* insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ { yymsp[-6].minor.yy42 = createInsertStmt(pCxt, yymsp[-4].minor.yy42, yymsp[-2].minor.yy110, yymsp[0].minor.yy42); } break; - case 335: /* insert_query ::= INSERT INTO full_table_name query_or_subquery */ + case 336: /* insert_query ::= INSERT INTO full_table_name query_or_subquery */ { yymsp[-3].minor.yy42 = createInsertStmt(pCxt, yymsp[-1].minor.yy42, NULL, yymsp[0].minor.yy42); } break; - case 336: /* literal ::= NK_INTEGER */ + case 337: /* literal ::= NK_INTEGER */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 337: /* literal ::= NK_FLOAT */ + case 338: /* literal ::= NK_FLOAT */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 338: /* literal ::= NK_STRING */ + case 339: /* literal ::= NK_STRING */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 339: /* literal ::= NK_BOOL */ + case 340: /* literal ::= NK_BOOL */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 340: /* literal ::= TIMESTAMP NK_STRING */ + case 341: /* literal ::= TIMESTAMP NK_STRING */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 341: /* literal ::= duration_literal */ - case 351: /* signed_literal ::= signed */ yytestcase(yyruleno==351); - case 372: /* expr_or_subquery ::= expression */ yytestcase(yyruleno==372); - case 373: /* expression ::= literal */ yytestcase(yyruleno==373); - case 374: /* expression ::= pseudo_column */ yytestcase(yyruleno==374); - case 375: /* expression ::= column_reference */ yytestcase(yyruleno==375); - case 376: /* expression ::= function_expression */ yytestcase(yyruleno==376); - case 377: /* expression ::= case_when_expression */ yytestcase(yyruleno==377); - case 408: /* function_expression ::= literal_func */ yytestcase(yyruleno==408); - case 457: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==457); - case 461: /* boolean_primary ::= predicate */ yytestcase(yyruleno==461); - case 463: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==463); - case 464: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==464); - case 467: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==467); - case 469: /* table_reference ::= table_primary */ yytestcase(yyruleno==469); - case 470: /* table_reference ::= joined_table */ yytestcase(yyruleno==470); - case 474: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==474); - case 532: /* query_simple ::= query_specification */ yytestcase(yyruleno==532); - case 533: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==533); - case 536: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==536); - case 538: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==538); + case 342: /* literal ::= duration_literal */ + case 352: /* signed_literal ::= signed */ yytestcase(yyruleno==352); + case 373: /* expr_or_subquery ::= expression */ yytestcase(yyruleno==373); + case 374: /* expression ::= literal */ yytestcase(yyruleno==374); + case 375: /* expression ::= pseudo_column */ yytestcase(yyruleno==375); + case 376: /* expression ::= column_reference */ yytestcase(yyruleno==376); + case 377: /* expression ::= function_expression */ yytestcase(yyruleno==377); + case 378: /* expression ::= case_when_expression */ yytestcase(yyruleno==378); + case 409: /* function_expression ::= literal_func */ yytestcase(yyruleno==409); + case 458: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==458); + case 462: /* boolean_primary ::= predicate */ yytestcase(yyruleno==462); + case 464: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==464); + case 465: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==465); + case 468: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==468); + case 470: /* table_reference ::= table_primary */ yytestcase(yyruleno==470); + case 471: /* table_reference ::= joined_table */ yytestcase(yyruleno==471); + case 475: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==475); + case 533: /* query_simple ::= query_specification */ yytestcase(yyruleno==533); + case 534: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==534); + case 537: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==537); + case 539: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==539); { yylhsminor.yy42 = yymsp[0].minor.yy42; } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 342: /* literal ::= NULL */ + case 343: /* literal ::= NULL */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 343: /* literal ::= NK_QUESTION */ + case 344: /* literal ::= NK_QUESTION */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 344: /* duration_literal ::= NK_VARIABLE */ + case 345: /* duration_literal ::= NK_VARIABLE */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 345: /* signed ::= NK_INTEGER */ + case 346: /* signed ::= NK_INTEGER */ { yylhsminor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 346: /* signed ::= NK_PLUS NK_INTEGER */ + case 347: /* signed ::= NK_PLUS NK_INTEGER */ { yymsp[-1].minor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } break; - case 347: /* signed ::= NK_MINUS NK_INTEGER */ + case 348: /* signed ::= NK_MINUS NK_INTEGER */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; @@ -4806,14 +4809,14 @@ static YYACTIONTYPE yy_reduce( } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 348: /* signed ::= NK_FLOAT */ + case 349: /* signed ::= NK_FLOAT */ { yylhsminor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 349: /* signed ::= NK_PLUS NK_FLOAT */ + case 350: /* signed ::= NK_PLUS NK_FLOAT */ { yymsp[-1].minor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } break; - case 350: /* signed ::= NK_MINUS NK_FLOAT */ + case 351: /* signed ::= NK_MINUS NK_FLOAT */ { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; @@ -4821,57 +4824,57 @@ static YYACTIONTYPE yy_reduce( } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 352: /* signed_literal ::= NK_STRING */ + case 353: /* signed_literal ::= NK_STRING */ { yylhsminor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 353: /* signed_literal ::= NK_BOOL */ + case 354: /* signed_literal ::= NK_BOOL */ { yylhsminor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 354: /* signed_literal ::= TIMESTAMP NK_STRING */ + case 355: /* signed_literal ::= TIMESTAMP NK_STRING */ { yymsp[-1].minor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } break; - case 355: /* signed_literal ::= duration_literal */ - case 357: /* signed_literal ::= literal_func */ yytestcase(yyruleno==357); - case 428: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==428); - case 490: /* select_item ::= common_expression */ yytestcase(yyruleno==490); - case 500: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==500); - case 537: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==537); - case 539: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==539); - case 552: /* search_condition ::= common_expression */ yytestcase(yyruleno==552); + case 356: /* signed_literal ::= duration_literal */ + case 358: /* signed_literal ::= literal_func */ yytestcase(yyruleno==358); + case 429: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==429); + case 491: /* select_item ::= common_expression */ yytestcase(yyruleno==491); + case 501: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==501); + case 538: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==538); + case 540: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==540); + case 553: /* search_condition ::= common_expression */ yytestcase(yyruleno==553); { yylhsminor.yy42 = releaseRawExprNode(pCxt, yymsp[0].minor.yy42); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 356: /* signed_literal ::= NULL */ + case 357: /* signed_literal ::= NULL */ { yylhsminor.yy42 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 358: /* signed_literal ::= NK_QUESTION */ + case 359: /* signed_literal ::= NK_QUESTION */ { yylhsminor.yy42 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 378: /* expression ::= NK_LP expression NK_RP */ - case 462: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==462); - case 551: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==551); + case 379: /* expression ::= NK_LP expression NK_RP */ + case 463: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==463); + case 552: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==552); { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy42)); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 379: /* expression ::= NK_PLUS expr_or_subquery */ + case 380: /* expression ::= NK_PLUS expr_or_subquery */ { SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy42)); } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 380: /* expression ::= NK_MINUS expr_or_subquery */ + case 381: /* expression ::= NK_MINUS expr_or_subquery */ { SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy42), NULL)); } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 381: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ + case 382: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -4879,7 +4882,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 382: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ + case 383: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -4887,7 +4890,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 383: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ + case 384: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -4895,7 +4898,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 384: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ + case 385: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -4903,7 +4906,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 385: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ + case 386: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -4911,14 +4914,14 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 386: /* expression ::= column_reference NK_ARROW NK_STRING */ + case 387: /* expression ::= column_reference NK_ARROW NK_STRING */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); yylhsminor.yy42 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy42), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 387: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ + case 388: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -4926,7 +4929,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 388: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ + case 389: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -4934,71 +4937,71 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 391: /* column_reference ::= column_name */ + case 392: /* column_reference ::= column_name */ { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy225, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy225)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 392: /* column_reference ::= table_name NK_DOT column_name */ + case 393: /* column_reference ::= table_name NK_DOT column_name */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy225, &yymsp[0].minor.yy225, createColumnNode(pCxt, &yymsp[-2].minor.yy225, &yymsp[0].minor.yy225)); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 393: /* pseudo_column ::= ROWTS */ - case 394: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==394); - case 396: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==396); - case 397: /* pseudo_column ::= QEND */ yytestcase(yyruleno==397); - case 398: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==398); - case 399: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==399); - case 400: /* pseudo_column ::= WEND */ yytestcase(yyruleno==400); - case 401: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==401); - case 402: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==402); - case 403: /* pseudo_column ::= ISFILLED */ yytestcase(yyruleno==403); - case 404: /* pseudo_column ::= QTAGS */ yytestcase(yyruleno==404); - case 410: /* literal_func ::= NOW */ yytestcase(yyruleno==410); + case 394: /* pseudo_column ::= ROWTS */ + case 395: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==395); + case 397: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==397); + case 398: /* pseudo_column ::= QEND */ yytestcase(yyruleno==398); + case 399: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==399); + case 400: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==400); + case 401: /* pseudo_column ::= WEND */ yytestcase(yyruleno==401); + case 402: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==402); + case 403: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==403); + case 404: /* pseudo_column ::= ISFILLED */ yytestcase(yyruleno==404); + case 405: /* pseudo_column ::= QTAGS */ yytestcase(yyruleno==405); + case 411: /* literal_func ::= NOW */ yytestcase(yyruleno==411); { yylhsminor.yy42 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 395: /* pseudo_column ::= table_name NK_DOT TBNAME */ + case 396: /* pseudo_column ::= table_name NK_DOT TBNAME */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy225, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy225)))); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 405: /* function_expression ::= function_name NK_LP expression_list NK_RP */ - case 406: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==406); + case 406: /* function_expression ::= function_name NK_LP expression_list NK_RP */ + case 407: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==407); { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy225, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy225, yymsp[-1].minor.yy110)); } yymsp[-3].minor.yy42 = yylhsminor.yy42; break; - case 407: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ + case 408: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy42), yymsp[-1].minor.yy448)); } yymsp[-5].minor.yy42 = yylhsminor.yy42; break; - case 409: /* literal_func ::= noarg_func NK_LP NK_RP */ + case 410: /* literal_func ::= noarg_func NK_LP NK_RP */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy225, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy225, NULL)); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 424: /* star_func_para_list ::= NK_STAR */ + case 425: /* star_func_para_list ::= NK_STAR */ { yylhsminor.yy110 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } yymsp[0].minor.yy110 = yylhsminor.yy110; break; - case 429: /* star_func_para ::= table_name NK_DOT NK_STAR */ - case 493: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==493); + case 430: /* star_func_para ::= table_name NK_DOT NK_STAR */ + case 494: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==494); { yylhsminor.yy42 = createColumnNode(pCxt, &yymsp[-2].minor.yy225, &yymsp[0].minor.yy0); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 430: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */ + case 431: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, NULL, yymsp[-2].minor.yy110, yymsp[-1].minor.yy42)); } yymsp[-3].minor.yy42 = yylhsminor.yy42; break; - case 431: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ + case 432: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy42), yymsp[-2].minor.yy110, yymsp[-1].minor.yy42)); } yymsp[-4].minor.yy42 = yylhsminor.yy42; break; - case 434: /* when_then_expr ::= WHEN common_expression THEN common_expression */ + case 435: /* when_then_expr ::= WHEN common_expression THEN common_expression */ { yymsp[-3].minor.yy42 = createWhenThenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy42), releaseRawExprNode(pCxt, yymsp[0].minor.yy42)); } break; - case 436: /* case_when_else_opt ::= ELSE common_expression */ + case 437: /* case_when_else_opt ::= ELSE common_expression */ { yymsp[-1].minor.yy42 = releaseRawExprNode(pCxt, yymsp[0].minor.yy42); } break; - case 437: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ - case 442: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==442); + case 438: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ + case 443: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==443); { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -5006,7 +5009,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 438: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ + case 439: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -5014,7 +5017,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-4].minor.yy42 = yylhsminor.yy42; break; - case 439: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ + case 440: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -5022,71 +5025,71 @@ static YYACTIONTYPE yy_reduce( } yymsp[-5].minor.yy42 = yylhsminor.yy42; break; - case 440: /* predicate ::= expr_or_subquery IS NULL */ + case 441: /* predicate ::= expr_or_subquery IS NULL */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); yylhsminor.yy42 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy42), NULL)); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 441: /* predicate ::= expr_or_subquery IS NOT NULL */ + case 442: /* predicate ::= expr_or_subquery IS NOT NULL */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy42); yylhsminor.yy42 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy42), NULL)); } yymsp[-3].minor.yy42 = yylhsminor.yy42; break; - case 443: /* compare_op ::= NK_LT */ + case 444: /* compare_op ::= NK_LT */ { yymsp[0].minor.yy2 = OP_TYPE_LOWER_THAN; } break; - case 444: /* compare_op ::= NK_GT */ + case 445: /* compare_op ::= NK_GT */ { yymsp[0].minor.yy2 = OP_TYPE_GREATER_THAN; } break; - case 445: /* compare_op ::= NK_LE */ + case 446: /* compare_op ::= NK_LE */ { yymsp[0].minor.yy2 = OP_TYPE_LOWER_EQUAL; } break; - case 446: /* compare_op ::= NK_GE */ + case 447: /* compare_op ::= NK_GE */ { yymsp[0].minor.yy2 = OP_TYPE_GREATER_EQUAL; } break; - case 447: /* compare_op ::= NK_NE */ + case 448: /* compare_op ::= NK_NE */ { yymsp[0].minor.yy2 = OP_TYPE_NOT_EQUAL; } break; - case 448: /* compare_op ::= NK_EQ */ + case 449: /* compare_op ::= NK_EQ */ { yymsp[0].minor.yy2 = OP_TYPE_EQUAL; } break; - case 449: /* compare_op ::= LIKE */ + case 450: /* compare_op ::= LIKE */ { yymsp[0].minor.yy2 = OP_TYPE_LIKE; } break; - case 450: /* compare_op ::= NOT LIKE */ + case 451: /* compare_op ::= NOT LIKE */ { yymsp[-1].minor.yy2 = OP_TYPE_NOT_LIKE; } break; - case 451: /* compare_op ::= MATCH */ + case 452: /* compare_op ::= MATCH */ { yymsp[0].minor.yy2 = OP_TYPE_MATCH; } break; - case 452: /* compare_op ::= NMATCH */ + case 453: /* compare_op ::= NMATCH */ { yymsp[0].minor.yy2 = OP_TYPE_NMATCH; } break; - case 453: /* compare_op ::= CONTAINS */ + case 454: /* compare_op ::= CONTAINS */ { yymsp[0].minor.yy2 = OP_TYPE_JSON_CONTAINS; } break; - case 454: /* in_op ::= IN */ + case 455: /* in_op ::= IN */ { yymsp[0].minor.yy2 = OP_TYPE_IN; } break; - case 455: /* in_op ::= NOT IN */ + case 456: /* in_op ::= NOT IN */ { yymsp[-1].minor.yy2 = OP_TYPE_NOT_IN; } break; - case 456: /* in_predicate_value ::= NK_LP literal_list NK_RP */ + case 457: /* in_predicate_value ::= NK_LP literal_list NK_RP */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy110)); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 458: /* boolean_value_expression ::= NOT boolean_primary */ + case 459: /* boolean_value_expression ::= NOT boolean_primary */ { SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy42), NULL)); } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 459: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ + case 460: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -5094,7 +5097,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 460: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ + case 461: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy42); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy42); @@ -5102,48 +5105,48 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 466: /* from_clause_opt ::= FROM table_reference_list */ - case 495: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==495); - case 526: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==526); + case 467: /* from_clause_opt ::= FROM table_reference_list */ + case 496: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==496); + case 527: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==527); { yymsp[-1].minor.yy42 = yymsp[0].minor.yy42; } break; - case 468: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ + case 469: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ { yylhsminor.yy42 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy42, yymsp[0].minor.yy42, NULL); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 471: /* table_primary ::= table_name alias_opt */ + case 472: /* table_primary ::= table_name alias_opt */ { yylhsminor.yy42 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy225, &yymsp[0].minor.yy225); } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 472: /* table_primary ::= db_name NK_DOT table_name alias_opt */ + case 473: /* table_primary ::= db_name NK_DOT table_name alias_opt */ { yylhsminor.yy42 = createRealTableNode(pCxt, &yymsp[-3].minor.yy225, &yymsp[-1].minor.yy225, &yymsp[0].minor.yy225); } yymsp[-3].minor.yy42 = yylhsminor.yy42; break; - case 473: /* table_primary ::= subquery alias_opt */ + case 474: /* table_primary ::= subquery alias_opt */ { yylhsminor.yy42 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy42), &yymsp[0].minor.yy225); } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 475: /* alias_opt ::= */ + case 476: /* alias_opt ::= */ { yymsp[1].minor.yy225 = nil_token; } break; - case 477: /* alias_opt ::= AS table_alias */ + case 478: /* alias_opt ::= AS table_alias */ { yymsp[-1].minor.yy225 = yymsp[0].minor.yy225; } break; - case 478: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ - case 479: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==479); + case 479: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ + case 480: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==480); { yymsp[-2].minor.yy42 = yymsp[-1].minor.yy42; } break; - case 480: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ + case 481: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */ { yylhsminor.yy42 = createJoinTableNode(pCxt, yymsp[-4].minor.yy638, yymsp[-5].minor.yy42, yymsp[-2].minor.yy42, yymsp[0].minor.yy42); } yymsp[-5].minor.yy42 = yylhsminor.yy42; break; - case 481: /* join_type ::= */ + case 482: /* join_type ::= */ { yymsp[1].minor.yy638 = JOIN_TYPE_INNER; } break; - case 482: /* join_type ::= INNER */ + case 483: /* join_type ::= INNER */ { yymsp[0].minor.yy638 = JOIN_TYPE_INNER; } break; - case 483: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ + case 484: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ { yymsp[-11].minor.yy42 = createSelectStmt(pCxt, yymsp[-10].minor.yy103, yymsp[-9].minor.yy110, yymsp[-8].minor.yy42); yymsp[-11].minor.yy42 = addWhereClause(pCxt, yymsp[-11].minor.yy42, yymsp[-7].minor.yy42); @@ -5156,82 +5159,82 @@ static YYACTIONTYPE yy_reduce( yymsp[-11].minor.yy42 = addFillClause(pCxt, yymsp[-11].minor.yy42, yymsp[-3].minor.yy42); } break; - case 486: /* set_quantifier_opt ::= ALL */ + case 487: /* set_quantifier_opt ::= ALL */ { yymsp[0].minor.yy103 = false; } break; - case 489: /* select_item ::= NK_STAR */ + case 490: /* select_item ::= NK_STAR */ { yylhsminor.yy42 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); } yymsp[0].minor.yy42 = yylhsminor.yy42; break; - case 491: /* select_item ::= common_expression column_alias */ - case 501: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==501); + case 492: /* select_item ::= common_expression column_alias */ + case 502: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==502); { yylhsminor.yy42 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy42), &yymsp[0].minor.yy225); } yymsp[-1].minor.yy42 = yylhsminor.yy42; break; - case 492: /* select_item ::= common_expression AS column_alias */ - case 502: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==502); + case 493: /* select_item ::= common_expression AS column_alias */ + case 503: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==503); { yylhsminor.yy42 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy42), &yymsp[0].minor.yy225); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 497: /* partition_by_clause_opt ::= PARTITION BY partition_list */ - case 522: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==522); - case 541: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==541); + case 498: /* partition_by_clause_opt ::= PARTITION BY partition_list */ + case 523: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==523); + case 542: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==542); { yymsp[-2].minor.yy110 = yymsp[0].minor.yy110; } break; - case 504: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ + case 505: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */ { yymsp[-5].minor.yy42 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy42), releaseRawExprNode(pCxt, yymsp[-1].minor.yy42)); } break; - case 505: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ + case 506: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ { yymsp[-3].minor.yy42 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy42)); } break; - case 506: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ + case 507: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */ { yymsp[-5].minor.yy42 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy42), NULL, yymsp[-1].minor.yy42, yymsp[0].minor.yy42); } break; - case 507: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ + case 508: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */ { yymsp[-7].minor.yy42 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy42), releaseRawExprNode(pCxt, yymsp[-3].minor.yy42), yymsp[-1].minor.yy42, yymsp[0].minor.yy42); } break; - case 508: /* twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ + case 509: /* twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ { yymsp[-6].minor.yy42 = createEventWindowNode(pCxt, yymsp[-3].minor.yy42, yymsp[0].minor.yy42); } break; - case 512: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ + case 513: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ { yymsp[-3].minor.yy42 = createFillNode(pCxt, yymsp[-1].minor.yy410, NULL); } break; - case 513: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ + case 514: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */ { yymsp[-5].minor.yy42 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy110)); } break; - case 514: /* fill_opt ::= FILL NK_LP VALUE_F NK_COMMA literal_list NK_RP */ + case 515: /* fill_opt ::= FILL NK_LP VALUE_F NK_COMMA literal_list NK_RP */ { yymsp[-5].minor.yy42 = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, yymsp[-1].minor.yy110)); } break; - case 515: /* fill_mode ::= NONE */ + case 516: /* fill_mode ::= NONE */ { yymsp[0].minor.yy410 = FILL_MODE_NONE; } break; - case 516: /* fill_mode ::= PREV */ + case 517: /* fill_mode ::= PREV */ { yymsp[0].minor.yy410 = FILL_MODE_PREV; } break; - case 517: /* fill_mode ::= NULL */ + case 518: /* fill_mode ::= NULL */ { yymsp[0].minor.yy410 = FILL_MODE_NULL; } break; - case 518: /* fill_mode ::= NULL_F */ + case 519: /* fill_mode ::= NULL_F */ { yymsp[0].minor.yy410 = FILL_MODE_NULL_F; } break; - case 519: /* fill_mode ::= LINEAR */ + case 520: /* fill_mode ::= LINEAR */ { yymsp[0].minor.yy410 = FILL_MODE_LINEAR; } break; - case 520: /* fill_mode ::= NEXT */ + case 521: /* fill_mode ::= NEXT */ { yymsp[0].minor.yy410 = FILL_MODE_NEXT; } break; - case 523: /* group_by_list ::= expr_or_subquery */ + case 524: /* group_by_list ::= expr_or_subquery */ { yylhsminor.yy110 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy42))); } yymsp[0].minor.yy110 = yylhsminor.yy110; break; - case 524: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ + case 525: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ { yylhsminor.yy110 = addNodeToList(pCxt, yymsp[-2].minor.yy110, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy42))); } yymsp[-2].minor.yy110 = yylhsminor.yy110; break; - case 528: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ + case 529: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ { yymsp[-5].minor.yy42 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy42), releaseRawExprNode(pCxt, yymsp[-1].minor.yy42)); } break; - case 531: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ + case 532: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ { yylhsminor.yy42 = addOrderByClause(pCxt, yymsp[-3].minor.yy42, yymsp[-2].minor.yy110); yylhsminor.yy42 = addSlimitClause(pCxt, yylhsminor.yy42, yymsp[-1].minor.yy42); @@ -5239,50 +5242,50 @@ static YYACTIONTYPE yy_reduce( } yymsp[-3].minor.yy42 = yylhsminor.yy42; break; - case 534: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ + case 535: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ { yylhsminor.yy42 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy42, yymsp[0].minor.yy42); } yymsp[-3].minor.yy42 = yylhsminor.yy42; break; - case 535: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ + case 536: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ { yylhsminor.yy42 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy42, yymsp[0].minor.yy42); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 543: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ - case 547: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==547); + case 544: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ + case 548: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==548); { yymsp[-1].minor.yy42 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } break; - case 544: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ - case 548: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==548); + case 545: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ + case 549: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==549); { yymsp[-3].minor.yy42 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 545: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ - case 549: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==549); + case 546: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ + case 550: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==550); { yymsp[-3].minor.yy42 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } break; - case 550: /* subquery ::= NK_LP query_expression NK_RP */ + case 551: /* subquery ::= NK_LP query_expression NK_RP */ { yylhsminor.yy42 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy42); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 555: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ + case 556: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ { yylhsminor.yy42 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy42), yymsp[-1].minor.yy106, yymsp[0].minor.yy599); } yymsp[-2].minor.yy42 = yylhsminor.yy42; break; - case 556: /* ordering_specification_opt ::= */ + case 557: /* ordering_specification_opt ::= */ { yymsp[1].minor.yy106 = ORDER_ASC; } break; - case 557: /* ordering_specification_opt ::= ASC */ + case 558: /* ordering_specification_opt ::= ASC */ { yymsp[0].minor.yy106 = ORDER_ASC; } break; - case 558: /* ordering_specification_opt ::= DESC */ + case 559: /* ordering_specification_opt ::= DESC */ { yymsp[0].minor.yy106 = ORDER_DESC; } break; - case 559: /* null_ordering_opt ::= */ + case 560: /* null_ordering_opt ::= */ { yymsp[1].minor.yy599 = NULL_ORDER_DEFAULT; } break; - case 560: /* null_ordering_opt ::= NULLS FIRST */ + case 561: /* null_ordering_opt ::= NULLS FIRST */ { yymsp[-1].minor.yy599 = NULL_ORDER_FIRST; } break; - case 561: /* null_ordering_opt ::= NULLS LAST */ + case 562: /* null_ordering_opt ::= NULLS LAST */ { yymsp[-1].minor.yy599 = NULL_ORDER_LAST; } break; default: diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h index 9553cb97b5..bd06572cb4 100644 --- a/source/libs/qworker/inc/qwInt.h +++ b/source/libs/qworker/inc/qwInt.h @@ -206,6 +206,8 @@ typedef struct SQWorkerMgmt { int32_t paramIdx; } SQWorkerMgmt; +#define QW_CTX_NOT_EXISTS_ERR_CODE(mgmt) (atomic_load_8(&(mgmt)->nodeStopped) ? TSDB_CODE_VND_STOPPED : TSDB_CODE_QRY_TASK_CTX_NOT_EXIST) + #define QW_FPARAMS_DEF SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId #define QW_IDS() sId, qId, tId, rId, eId #define QW_FPARAMS() mgmt, QW_IDS() diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index 7ee7c50c96..a342e48cc1 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -213,15 +213,9 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id)); - int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); if (NULL == (*ctx)) { - if (!nodeStopped) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } else { - QW_TASK_DLOG_E("node stopped"); - QW_ERR_RET(TSDB_CODE_VND_STOPPED); - } + QW_TASK_DLOG_E("acquired task ctx not exist, may be dropped"); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } return TSDB_CODE_SUCCESS; @@ -232,16 +226,9 @@ int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { QW_SET_QTID(id, qId, tId, eId); *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); - int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped); - if (NULL == (*ctx)) { - if (!nodeStopped) { - QW_TASK_DLOG_E("task ctx not exist, may be dropped"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); - } else { - QW_TASK_DLOG_E("node stopped"); - QW_ERR_RET(TSDB_CODE_VND_STOPPED); - } + QW_TASK_DLOG_E("get task ctx not exist, may be dropped"); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } return TSDB_CODE_SUCCESS; @@ -334,7 +321,8 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id)); if (NULL == ctx) { - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + QW_TASK_DLOG_E("drop task ctx not exist, may be dropped"); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } octx = *ctx; @@ -346,7 +334,7 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) { if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) { QW_TASK_ELOG_E("taosHashRemove from ctx hash failed"); - QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST); + QW_ERR_RET(QW_CTX_NOT_EXISTS_ERR_CODE(mgmt)); } qwFreeTaskCtx(&octx); diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 5645e969a2..b120fecd9d 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -262,6 +262,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, SOutputData output = {0}; if (NULL == ctx->sinkHandle) { + pOutput->queryEnd = true; return TSDB_CODE_SUCCESS; } @@ -757,7 +758,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { } QW_LOCK(QW_WRITE, &ctx->lock); - if (qComplete || (queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code) { + if (atomic_load_8((int8_t*)&ctx->queryEnd) || (queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code) { // Note: query is not running anymore QW_SET_PHASE(ctx, QW_PHASE_POST_CQUERY); QW_UNLOCK(QW_WRITE, &ctx->lock); @@ -846,6 +847,9 @@ _return: qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code); QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code), dataLen); + } else { + qwFreeFetchRsp(rsp); + rsp = NULL; } } @@ -1216,7 +1220,7 @@ void qWorkerStopAllTasks(void *qWorkerMgmt) { QW_UPDATE_RSP_CODE(ctx, TSDB_CODE_VND_STOPPED); QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_DROP); } else { - qwDropTask(QW_FPARAMS()); + (void)qwDropTask(QW_FPARAMS()); } QW_UNLOCK(QW_WRITE, &ctx->lock); diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 74d555af77..25e65d2588 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -1057,7 +1057,7 @@ static FORCE_INLINE int32_t filterAddColFieldFromField(SFilterInfo *info, SFilte int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *fid) { if (node == NULL) { - fltError("empty node"); + fltDebug("empty node"); FLT_ERR_RET(TSDB_CODE_APP_ERROR); } diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 2a5eeecd36..85b952937f 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -230,6 +230,7 @@ typedef struct SSchTask { SSchRedirectCtx redirectCtx; // task redirect context bool waitRetry; // wait for retry int32_t execId; // task current execute index + int32_t failedExecId; // last failed task execute index SSchLevel *level; // level SRWLatch planLock; // task update plan lock SSubplan *plan; // subplan diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index ad675cf383..4e05e22474 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -34,12 +34,12 @@ int32_t schValidateRspMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) { if (lastMsgType != reqMsgType) { SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } if (taskStatus != JOB_TASK_STATUS_PART_SUCC) { SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } return TSDB_CODE_SUCCESS; @@ -60,13 +60,13 @@ int32_t schValidateRspMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) { if (lastMsgType != reqMsgType) { SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } if (taskStatus != JOB_TASK_STATUS_EXEC) { SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_ERR_RET(TSDB_CODE_QW_MSG_ERROR); } return TSDB_CODE_SUCCESS; diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 8e60222ca6..bdab739327 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -64,6 +64,7 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel * pTask->plan = pPlan; pTask->level = pLevel; pTask->execId = -1; + pTask->failedExecId = -2; pTask->timeoutUsec = SCH_DEFAULT_TASK_TIMEOUT_USEC; pTask->taskId = schGenTaskId(); @@ -166,7 +167,7 @@ int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, v schUpdateTaskExecNode(pJob, pTask, handle, execId); - if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it + if ((execId != pTask->execId || execId <= pTask->failedExecId) || pTask->waitRetry) { // ignore it SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry); SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR); @@ -182,6 +183,8 @@ int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) return TSDB_CODE_SCH_IGNORE_ERROR; } + pTask->failedExecId = pTask->execId; + int8_t jobStatus = 0; if (schJobNeedToStop(pJob, &jobStatus)) { SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(jobStatus)); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index efd19074da..670cfbead1 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#if 0 #include "streamInc.h" int32_t tEncodeSStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckpointSourceReq* pReq) { @@ -192,3 +193,4 @@ int32_t streamProcessCheckpointRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStre // set status normal return 0; } +#endif diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 3f2feb9d28..ffb600be6e 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -17,17 +17,16 @@ int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData) { int32_t blockNum = pReq->blockNum; - SArray* pArray = taosArrayInit(blockNum, sizeof(SSDataBlock)); + SArray* pArray = taosArrayInit_s(blockNum, sizeof(SSDataBlock), blockNum); if (pArray == NULL) { return -1; } - taosArraySetSize(pArray, blockNum); ASSERT(pReq->blockNum == taosArrayGetSize(pReq->data)); ASSERT(pReq->blockNum == taosArrayGetSize(pReq->dataLen)); for (int32_t i = 0; i < blockNum; i++) { - SRetrieveTableRsp* pRetrieve = taosArrayGetP(pReq->data, i); + SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*) taosArrayGetP(pReq->data, i); SSDataBlock* pDataBlock = taosArrayGet(pArray, i); blockDecode(pDataBlock, pRetrieve->data); // TODO: refactor @@ -49,7 +48,7 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock if (pArray == NULL) { return -1; } - taosArraySetSize(pArray, 1); + taosArrayPush(pArray, &(SSDataBlock){0}); SRetrieveTableRsp* pRetrieve = pReq->pRetrieve; SSDataBlock* pDataBlock = taosArrayGet(pArray, 0); blockDecode(pDataBlock, pRetrieve->data); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 8f63cbbd99..9226d6ebb8 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -15,6 +15,8 @@ #include "streamInc.h" +#define STREAM_EXEC_MAX_BATCH_NUM 100 + static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* pRes) { int32_t code; void* exec = pTask->exec.executor; @@ -112,12 +114,14 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { int32_t batchCnt = 0; while (1) { if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) { + taosArrayDestroy(pRes); return 0; } SSDataBlock* output = NULL; uint64_t ts = 0; if (qExecTask(exec, &output, &ts) < 0) { + taosArrayDestroy(pRes); return -1; } if (output == NULL) { @@ -227,6 +231,9 @@ int32_t streamExecForAll(SStreamTask* pTask) { batchCnt++; input = newRet; streamQueueProcessSuccess(pTask->inputQueue); + if (batchCnt > STREAM_EXEC_MAX_BATCH_NUM) { + break; + } } } } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 2f991288ff..518ace8630 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -44,7 +44,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF goto _err; } - pMeta->pTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + pMeta->pTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (pMeta->pTasks == NULL) { goto _err; } @@ -129,13 +129,8 @@ FAIL: } #endif -#if 1 -int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { - void* buf = NULL; - if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { - return -1; - } - +int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) { + void* buf = NULL; int32_t len; int32_t code; tEncodeSize(tEncodeSStreamTask, pTask, len, code); @@ -153,17 +148,30 @@ int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { tEncoderClear(&encoder); if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), buf, len, pMeta->txn) < 0) { - ASSERT(0); return -1; } taosMemoryFree(buf); + return 0; +} + +#if 1 +int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { + if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { + return -1; + } + + if (streamMetaSaveTask(pMeta, pTask) < 0) { + return -1; + } + taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); return 0; } #endif +#if 0 SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId) { SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); if (ppTask) { @@ -173,6 +181,7 @@ SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId) { return NULL; } } +#endif SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId) { taosRLockLatch(&pMeta->lock); @@ -255,10 +264,9 @@ int32_t streamMetaAbort(SStreamMeta* pMeta) { return 0; } -int32_t streamLoadTasks(SStreamMeta* pMeta) { +int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { TBC* pCur = NULL; if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) { - ASSERT(0); return -1; } @@ -295,7 +303,11 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) { tdbTbcClose(pCur); return -1; } - pTask->taskStatus = TASK_STATUS__NORMAL; + /*pTask->taskStatus = TASK_STATUS__NORMAL;*/ + if (pTask->fillHistory) { + pTask->taskStatus = TASK_STATUS__WAIT_DOWNSTREAM; + streamTaskCheckDownstream(pTask, ver); + } } tdbFree(pKey); diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index 7eafcdc93e..882fba718b 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -46,6 +46,7 @@ void streamQueueClose(SStreamQueue* queue) { taosMemoryFree(queue); } +#if 0 bool streamQueueResEmpty(const SStreamQueueRes* pRes) { // return true; @@ -101,3 +102,4 @@ SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue) { if (pNode) return streamQueueBuildRes(pNode); return (SStreamQueueRes){0}; } +#endif diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 2ef351cbb0..a2b3e20dbf 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -107,8 +107,6 @@ static inline int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, } SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages) { - szPage = szPage < 0 ? 4096 : szPage; - pages = pages < 0 ? 256 : pages; SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState)); if (pState == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -128,6 +126,28 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int memset(statePath, 0, 1024); tstrncpy(statePath, path, 1024); } + + char cfgPath[1030]; + sprintf(cfgPath, "%s/cfg", statePath); + + char cfg[1024]; + memset(cfg, 0, 1024); + TdFilePtr pCfgFile = taosOpenFile(cfgPath, TD_FILE_READ); + if (pCfgFile != NULL) { + int64_t size; + taosFStatFile(pCfgFile, &size, NULL); + taosReadFile(pCfgFile, cfg, size); + sscanf(cfg, "%d\n%d\n", &szPage, &pages); + } else { + taosMulModeMkDir(statePath, 0755); + pCfgFile = taosOpenFile(cfgPath, TD_FILE_WRITE | TD_FILE_CREATE); + szPage = szPage < 0 ? 4096 : szPage; + pages = pages < 0 ? 256 : pages; + sprintf(cfg, "%d\n%d\n", szPage, pages); + taosWriteFile(pCfgFile, cfg, strlen(cfg)); + } + taosCloseFile(&pCfgFile); + if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 1) < 0) { goto _err; } @@ -893,4 +913,47 @@ char* streamStateSessionDump(SStreamState* pState) { streamStateFreeCur(pCur); return dumpBuf; } + +char* streamStateIntervalDump(SStreamState* pState) { + SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur)); + if (pCur == NULL) { + return NULL; + } + pCur->number = pState->number; + if (tdbTbcOpen(pState->pTdbState->pStateDb, &pCur->pCur, NULL) < 0) { + streamStateFreeCur(pCur); + return NULL; + } + tdbTbcMoveToFirst(pCur->pCur); + + SWinKey key = {0}; + void* buf = NULL; + int32_t bufSize = 0; + int32_t code = streamStateGetKVByCur(pCur, &key, (const void **)&buf, &bufSize); + if (code != 0) { + streamStateFreeCur(pCur); + return NULL; + } + + int32_t size = 2048; + char* dumpBuf = taosMemoryCalloc(size, 1); + int64_t len = 0; + len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.ts); + // len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey); + len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId); + while (1) { + tdbTbcMoveToNext(pCur->pCur); + key = (SWinKey){0}; + code = streamStateGetKVByCur(pCur, &key, NULL, 0); + if (code != 0) { + streamStateFreeCur(pCur); + return dumpBuf; + } + len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.ts); + // len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey); + len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId); + } + streamStateFreeCur(pCur); + return dumpBuf; +} #endif diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 7e08e195c1..6f2c1a1ad0 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -71,6 +71,7 @@ typedef struct SRaftId { typedef struct SRaftStore { SyncTerm currentTerm; SRaftId voteFor; + TdThreadMutex mutex; } SRaftStore; typedef struct SSyncHbTimerData { @@ -282,7 +283,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode); // raft vote -------------- void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId); -void syncNodeVoteForSelf(SSyncNode* pSyncNode); +void syncNodeVoteForSelf(SSyncNode* pSyncNode, SyncTerm term); // log replication SSyncLogReplMgr* syncNodeGetLogReplMgr(SSyncNode* pNode, SRaftId* pDestId); diff --git a/source/libs/sync/inc/syncRaftStore.h b/source/libs/sync/inc/syncRaftStore.h index 21a8fc64a8..38a8ed234b 100644 --- a/source/libs/sync/inc/syncRaftStore.h +++ b/source/libs/sync/inc/syncRaftStore.h @@ -26,14 +26,15 @@ extern "C" { #define RAFT_STORE_PATH_LEN (TSDB_FILENAME_LEN * 2) #define EMPTY_RAFT_ID ((SRaftId){.addr = 0, .vgId = 0}) -int32_t raftStoreReadFile(SSyncNode *pNode); -int32_t raftStoreWriteFile(SSyncNode *pNode); +int32_t raftStoreOpen(SSyncNode *pNode); +void raftStoreClose(SSyncNode *pNode); bool raftStoreHasVoted(SSyncNode *pNode); void raftStoreVote(SSyncNode *pNode, SRaftId *pRaftId); void raftStoreClearVote(SSyncNode *pNode); void raftStoreNextTerm(SSyncNode *pNode); void raftStoreSetTerm(SSyncNode *pNode, SyncTerm term); +SyncTerm raftStoreGetTerm(SSyncNode *pNode); #ifdef __cplusplus } diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 835e5d248e..b04bcb86c6 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -120,17 +120,17 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { // prepare response msg pReply->srcId = ths->myRaftId; pReply->destId = pMsg->srcId; - pReply->term = ths->raftStore.currentTerm; + pReply->term = raftStoreGetTerm(ths); pReply->success = false; pReply->matchIndex = SYNC_INDEX_INVALID; pReply->lastSendIndex = pMsg->prevLogIndex + 1; pReply->startTime = ths->startTime; - if (pMsg->term < ths->raftStore.currentTerm) { + if (pMsg->term < raftStoreGetTerm(ths)) { goto _SEND_RESPONSE; } - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(ths)) { pReply->term = pMsg->term; } diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 44a29da3ea..f81699b9f6 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -50,19 +50,19 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } // drop stale response - if (pMsg->term < ths->raftStore.currentTerm) { + if (pMsg->term < raftStoreGetTerm(ths)) { syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response"); return 0; } if (ths->state == TAOS_SYNC_STATE_LEADER) { - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(ths)) { syncLogRecvAppendEntriesReply(ths, pMsg, "error term"); syncNodeStepDown(ths, pMsg->term); return -1; } - ASSERT(pMsg->term == ths->raftStore.currentTerm); + ASSERT(pMsg->term == raftStoreGetTerm(ths)); sTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index 6d256a735d..2501b4df8b 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -111,7 +111,7 @@ int64_t syncNodeCheckCommitIndex(SSyncNode* ths, SyncIndex indexLikely) { SyncIndex commitIndex = indexLikely; syncNodeUpdateCommitIndex(ths, commitIndex); sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index:%" PRId64 "", ths->vgId, ths->state, - ths->raftStore.currentTerm, commitIndex); + raftStoreGetTerm(ths), commitIndex); } return ths->commitIndex; } diff --git a/source/libs/sync/src/syncElection.c b/source/libs/sync/src/syncElection.c index 682ace83ec..e53b8ade1c 100644 --- a/source/libs/sync/src/syncElection.c +++ b/source/libs/sync/src/syncElection.c @@ -51,7 +51,7 @@ static int32_t syncNodeRequestVotePeers(SSyncNode* pNode) { SyncRequestVote* pMsg = rpcMsg.pCont; pMsg->srcId = pNode->myRaftId; pMsg->destId = pNode->peersId[i]; - pMsg->term = pNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pNode); ret = syncNodeGetLastIndexTerm(pNode, &pMsg->lastLogIndex, &pMsg->lastLogTerm); if (ret < 0) { @@ -85,10 +85,12 @@ int32_t syncNodeElect(SSyncNode* pSyncNode) { // start election raftStoreNextTerm(pSyncNode); raftStoreClearVote(pSyncNode); - voteGrantedReset(pSyncNode->pVotesGranted, pSyncNode->raftStore.currentTerm); - votesRespondReset(pSyncNode->pVotesRespond, pSyncNode->raftStore.currentTerm); - syncNodeVoteForSelf(pSyncNode); + SyncTerm currentTerm = raftStoreGetTerm(pSyncNode); + voteGrantedReset(pSyncNode->pVotesGranted, currentTerm); + votesRespondReset(pSyncNode->pVotesRespond, currentTerm); + syncNodeVoteForSelf(pSyncNode, currentTerm); + if (voteGrantedMajority(pSyncNode->pVotesGranted)) { // only myself, to leader ASSERT(!pSyncNode->pVotesGranted->toLeader); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 1d96412ba3..5935f8393a 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -41,7 +41,6 @@ static void syncNodeEqPingTimer(void* param, void* tmrId); static void syncNodeEqElectTimer(void* param, void* tmrId); static void syncNodeEqHeartbeatTimer(void* param, void* tmrId); -static int32_t syncNodeEqNoop(SSyncNode* ths); static int32_t syncNodeAppendNoop(SSyncNode* ths); static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId); static bool syncIsConfigChanged(const SSyncCfg* pOldCfg, const SSyncCfg* pNewCfg); @@ -437,55 +436,12 @@ bool syncNodeIsReadyForRead(SSyncNode* pSyncNode) { return false; } - if (pSyncNode->restoreFinish) { - return true; - } - - bool ready = false; - if (!pSyncNode->pFsm->FpApplyQueueEmptyCb(pSyncNode->pFsm)) { - // apply queue not empty - ready = false; - - } else { - if (!pSyncNode->pLogStore->syncLogIsEmpty(pSyncNode->pLogStore)) { - SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); - SSyncRaftEntry* pEntry = NULL; - SLRUCache* pCache = pSyncNode->pLogStore->pCache; - LRUHandle* h = taosLRUCacheLookup(pCache, &lastIndex, sizeof(lastIndex)); - int32_t code = 0; - if (h) { - pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h); - code = 0; - - pSyncNode->pLogStore->cacheHit++; - sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", lastIndex, pEntry->bytes, pEntry); - - } else { - pSyncNode->pLogStore->cacheMiss++; - sNTrace(pSyncNode, "miss cache index:%" PRId64, lastIndex); - - code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, lastIndex, &pEntry); - } - - if (code == 0 && pEntry != NULL) { - if (pEntry->originalRpcType == TDMT_SYNC_NOOP && pEntry->term == pSyncNode->raftStore.currentTerm) { - ready = true; - } - - if (h) { - taosLRUCacheRelease(pCache, h, false); - } else { - syncEntryDestroy(pEntry); - } - } - } - } - - if (!ready) { + if (!pSyncNode->restoreFinish) { terrno = TSDB_CODE_SYN_RESTORING; + return false; } - return ready; + return true; } bool syncIsReadyForRead(int64_t rid) { @@ -664,7 +620,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_ int32_t code = syncNodeOnClientRequest(pSyncNode, pMsg, &retIndex); if (code == 0) { pMsg->info.conn.applyIndex = retIndex; - pMsg->info.conn.applyTerm = pSyncNode->raftStore.currentTerm; + pMsg->info.conn.applyTerm = raftStoreGetTerm(pSyncNode); sTrace("vgId:%d, propose optimized msg, index:%" PRId64 " type:%s", pSyncNode->vgId, retIndex, TMSG_INFO(pMsg->msgType)); return 1; @@ -922,7 +878,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) { // init TLA+ server vars pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER; - if (raftStoreReadFile(pSyncNode) != 0) { + if (raftStoreOpen(pSyncNode) != 0) { sError("vgId:%d, failed to open raft store at path %s", pSyncNode->vgId, pSyncNode->raftStorePath); goto _error; } @@ -1223,7 +1179,12 @@ void syncNodeClose(SSyncNode* pSyncNode) { if (pSyncNode == NULL) return; sNInfo(pSyncNode, "sync close, node:%p", pSyncNode); + syncNodeStopPingTimer(pSyncNode); + syncNodeStopElectTimer(pSyncNode); + syncNodeStopHeartbeatTimer(pSyncNode); + syncNodeLogReplMgrDestroy(pSyncNode); + syncRespMgrDestroy(pSyncNode->pSyncRespMgr); pSyncNode->pSyncRespMgr = NULL; voteGrantedDestroy(pSyncNode->pVotesGranted); @@ -1239,10 +1200,6 @@ void syncNodeClose(SSyncNode* pSyncNode) { syncLogBufferDestroy(pSyncNode->pLogBuf); pSyncNode->pLogBuf = NULL; - syncNodeStopPingTimer(pSyncNode); - syncNodeStopElectTimer(pSyncNode); - syncNodeStopHeartbeatTimer(pSyncNode); - for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) { if (pSyncNode->senders[i] != NULL) { sDebug("vgId:%d, snapshot sender destroy while close, data:%p", pSyncNode->vgId, pSyncNode->senders[i]); @@ -1270,6 +1227,8 @@ void syncNodeClose(SSyncNode* pSyncNode) { taosMemoryFree(pSyncNode->pFsm); } + raftStoreClose(pSyncNode); + taosMemoryFree(pSyncNode); } @@ -1644,7 +1603,7 @@ _END: // raft state change -------------- void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) { - if (term > pSyncNode->raftStore.currentTerm) { + if (term > raftStoreGetTerm(pSyncNode)) { raftStoreSetTerm(pSyncNode, term); char tmpBuf[64]; snprintf(tmpBuf, sizeof(tmpBuf), "update term to %" PRId64, term); @@ -1654,24 +1613,23 @@ void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) { } void syncNodeUpdateTermWithoutStepDown(SSyncNode* pSyncNode, SyncTerm term) { - if (term > pSyncNode->raftStore.currentTerm) { + if (term > raftStoreGetTerm(pSyncNode)) { raftStoreSetTerm(pSyncNode, term); } } void syncNodeStepDown(SSyncNode* pSyncNode, SyncTerm newTerm) { - if (pSyncNode->raftStore.currentTerm > newTerm) { - sNTrace(pSyncNode, "step down, ignore, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, - pSyncNode->raftStore.currentTerm); + SyncTerm currentTerm = raftStoreGetTerm(pSyncNode); + if (currentTerm > newTerm) { + sNTrace(pSyncNode, "step down, ignore, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, currentTerm); return; } do { - sNTrace(pSyncNode, "step down, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, - pSyncNode->raftStore.currentTerm); + sNTrace(pSyncNode, "step down, new-term:%" PRId64 ", current-term:%" PRId64, newTerm, currentTerm); } while (0); - if (pSyncNode->raftStore.currentTerm < newTerm) { + if (currentTerm < newTerm) { raftStoreSetTerm(pSyncNode, newTerm); char tmpBuf[64]; snprintf(tmpBuf, sizeof(tmpBuf), "step down, update term to %" PRId64, newTerm); @@ -1831,8 +1789,8 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) { SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); ASSERT(lastIndex >= 0); - sInfo("vgId:%d, become leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64 "", - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + sInfo("vgId:%d, become leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64 "", pSyncNode->vgId, + raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); } bool syncNodeIsMnode(SSyncNode* pSyncNode) { return (pSyncNode->vgId == 1); } @@ -1851,7 +1809,7 @@ void syncNodeFollower2Candidate(SSyncNode* pSyncNode) { pSyncNode->state = TAOS_SYNC_STATE_CANDIDATE; SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become candidate from follower. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64, - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "follower to candidate"); } @@ -1861,7 +1819,7 @@ void syncNodeLeader2Follower(SSyncNode* pSyncNode) { syncNodeBecomeFollower(pSyncNode, "leader to follower"); SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become follower from leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64, - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "leader to follower"); } @@ -1871,7 +1829,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) { syncNodeBecomeFollower(pSyncNode, "candidate to follower"); SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore); sInfo("vgId:%d, become follower from candidate. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64, - pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex); + pSyncNode->vgId, raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, lastIndex); sNTrace(pSyncNode, "candidate to follower"); } @@ -1879,7 +1837,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) { // just called by syncNodeVoteForSelf // need assert void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId) { - ASSERT(term == pSyncNode->raftStore.currentTerm); + ASSERT(term == raftStoreGetTerm(pSyncNode)); bool voted = raftStoreHasVoted(pSyncNode); ASSERT(!voted); @@ -1887,8 +1845,8 @@ void syncNodeVoteForTerm(SSyncNode* pSyncNode, SyncTerm term, SRaftId* pRaftId) } // simulate get vote from outside -void syncNodeVoteForSelf(SSyncNode* pSyncNode) { - syncNodeVoteForTerm(pSyncNode, pSyncNode->raftStore.currentTerm, &pSyncNode->myRaftId); +void syncNodeVoteForSelf(SSyncNode* pSyncNode, SyncTerm currentTerm) { + syncNodeVoteForTerm(pSyncNode, currentTerm, &pSyncNode->myRaftId); SRpcMsg rpcMsg = {0}; int32_t ret = syncBuildRequestVoteReply(&rpcMsg, pSyncNode->vgId); @@ -1897,7 +1855,7 @@ void syncNodeVoteForSelf(SSyncNode* pSyncNode) { SyncRequestVoteReply* pMsg = rpcMsg.pCont; pMsg->srcId = pSyncNode->myRaftId; pMsg->destId = pSyncNode->myRaftId; - pMsg->term = pSyncNode->raftStore.currentTerm; + pMsg->term = currentTerm; pMsg->voteGranted = true; voteGrantedVote(pSyncNode->pVotesGranted, pMsg); @@ -2210,7 +2168,7 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { SyncHeartbeat* pSyncMsg = rpcMsg.pCont; pSyncMsg->srcId = pSyncNode->myRaftId; pSyncMsg->destId = pData->destId; - pSyncMsg->term = pSyncNode->raftStore.currentTerm; + pSyncMsg->term = raftStoreGetTerm(pSyncNode); pSyncMsg->commitIndex = pSyncNode->commitIndex; pSyncMsg->minMatchIndex = syncMinMatchIndex(pSyncNode); pSyncMsg->privateTerm = 0; @@ -2249,30 +2207,6 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) { syncNodeRelease(pSyncNode); } -static int32_t syncNodeEqNoop(SSyncNode* pNode) { - if (pNode->state == TAOS_SYNC_STATE_LEADER) { - terrno = TSDB_CODE_SYN_NOT_LEADER; - return -1; - } - - SyncIndex index = pNode->pLogStore->syncLogWriteIndex(pNode->pLogStore); - SyncTerm term = pNode->raftStore.currentTerm; - SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, pNode->vgId); - if (pEntry == NULL) return -1; - - SRpcMsg rpcMsg = {0}; - int32_t code = syncBuildClientRequestFromNoopEntry(&rpcMsg, pEntry, pNode->vgId); - syncEntryDestroy(pEntry); - - sNTrace(pNode, "propose msg, type:noop"); - code = (*pNode->syncEqMsg)(pNode->msgcb, &rpcMsg); - if (code != 0) { - sError("failed to propose noop msg while enqueue since %s", terrstr()); - } - - return code; -} - static void deleteCacheEntry(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); } int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h) { @@ -2302,7 +2236,7 @@ int32_t syncNodeAppend(SSyncNode* ths, SSyncRaftEntry* pEntry) { if (syncLogBufferAppend(ths->pLogBuf, ths, pEntry) < 0) { sError("vgId:%d, failed to enqueue sync log buffer, index:%" PRId64, ths->vgId, pEntry->index); terrno = TSDB_CODE_SYN_BUFFER_FULL; - (void)syncLogFsmExecute(ths, ths->pFsm, ths->state, ths->raftStore.currentTerm, pEntry, TSDB_CODE_SYN_BUFFER_FULL); + (void)syncLogFsmExecute(ths, ths->pFsm, ths->state, raftStoreGetTerm(ths), pEntry, TSDB_CODE_SYN_BUFFER_FULL); syncEntryDestroy(pEntry); return -1; } @@ -2375,7 +2309,7 @@ bool syncNodeSnapshotRecving(SSyncNode* pSyncNode) { static int32_t syncNodeAppendNoop(SSyncNode* ths) { SyncIndex index = syncLogBufferGetEndIndex(ths->pLogBuf); - SyncTerm term = ths->raftStore.currentTerm; + SyncTerm term = raftStoreGetTerm(ths); SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId); if (pEntry == NULL) { @@ -2391,7 +2325,7 @@ static int32_t syncNodeAppendNoopOld(SSyncNode* ths) { int32_t ret = 0; SyncIndex index = ths->pLogStore->syncLogWriteIndex(ths->pLogStore); - SyncTerm term = ths->raftStore.currentTerm; + SyncTerm term = raftStoreGetTerm(ths); SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId); ASSERT(pEntry != NULL); @@ -2429,16 +2363,17 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SRpcMsg rpcMsg = {0}; (void)syncBuildHeartbeatReply(&rpcMsg, ths->vgId); + SyncTerm currentTerm = raftStoreGetTerm(ths); SyncHeartbeatReply* pMsgReply = rpcMsg.pCont; pMsgReply->destId = pMsg->srcId; pMsgReply->srcId = ths->myRaftId; - pMsgReply->term = ths->raftStore.currentTerm; + pMsgReply->term = currentTerm; pMsgReply->privateTerm = 8864; // magic number pMsgReply->startTime = ths->startTime; pMsgReply->timeStamp = tsMs; - if (pMsg->term == ths->raftStore.currentTerm && ths->state != TAOS_SYNC_STATE_LEADER) { + if (pMsg->term == currentTerm && ths->state != TAOS_SYNC_STATE_LEADER) { syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), tsMs); syncNodeResetElectTimer(ths); @@ -2467,7 +2402,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } } - if (pMsg->term >= ths->raftStore.currentTerm && ths->state != TAOS_SYNC_STATE_FOLLOWER) { + if (pMsg->term >= currentTerm && ths->state != TAOS_SYNC_STATE_FOLLOWER) { // syncNodeStepDown(ths, pMsg->term); SRpcMsg rpcMsgLocalCmd = {0}; (void)syncBuildLocalCmd(&rpcMsgLocalCmd, ths->vgId); @@ -2576,7 +2511,7 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SRpcMsg* pMsg, SyncIndex* pRetIn int32_t code = 0; SyncIndex index = syncLogBufferGetEndIndex(ths->pLogBuf); - SyncTerm term = ths->raftStore.currentTerm; + SyncTerm term = raftStoreGetTerm(ths); SSyncRaftEntry* pEntry = NULL; if (pMsg->msgType == TDMT_SYNC_CLIENT_REQUEST) { pEntry = syncEntryBuildFromClientRequest(pMsg->pCont, term, index); @@ -2620,73 +2555,6 @@ const char* syncStr(ESyncState state) { } } -#if 0 -int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) { - if (ths->state != TAOS_SYNC_STATE_FOLLOWER) { - sNTrace(ths, "I am not follower, can not do leader transfer"); - return 0; - } - - if (!ths->restoreFinish) { - sNTrace(ths, "restore not finish, can not do leader transfer"); - return 0; - } - - if (pEntry->term < ths->raftStore.currentTerm) { - sNTrace(ths, "little term:%" PRId64 ", can not do leader transfer", pEntry->term); - return 0; - } - - if (pEntry->index < syncNodeGetLastIndex(ths)) { - sNTrace(ths, "little index:%" PRId64 ", can not do leader transfer", pEntry->index); - return 0; - } - - /* - if (ths->vgId > 1) { - sNTrace(ths, "I am vnode, can not do leader transfer"); - return 0; - } - */ - - SyncLeaderTransfer* pSyncLeaderTransfer = pRpcMsg->pCont; - sNTrace(ths, "do leader transfer, index:%" PRId64, pEntry->index); - - bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId)); - bool sameNodeInfo = strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 && - pSyncLeaderTransfer->newNodeInfo.nodePort == ths->myNodeInfo.nodePort; - - bool same = sameId || sameNodeInfo; - if (same) { - // reset elect timer now! - int32_t electMS = 1; - int32_t ret = syncNodeRestartElectTimer(ths, electMS); - ASSERT(ret == 0); - - sNTrace(ths, "maybe leader transfer to %s:%d %" PRId64, pSyncLeaderTransfer->newNodeInfo.nodeFqdn, - pSyncLeaderTransfer->newNodeInfo.nodePort, pSyncLeaderTransfer->newLeaderId.addr); - } - - if (ths->pFsm->FpLeaderTransferCb != NULL) { - SFsmCbMeta cbMeta = { - .code = 0, - .currentTerm = ths->raftStore.currentTerm, - .flag = 0, - .index = pEntry->index, - .lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index), - .isWeak = pEntry->isWeak, - .seqNum = pEntry->seqNum, - .state = ths->state, - .term = pEntry->term, - }; - ths->pFsm->FpLeaderTransferCb(ths->pFsm, pRpcMsg, &cbMeta); - } - - return 0; -} - -#endif - int32_t syncNodeUpdateNewConfigIndex(SSyncNode* ths, SSyncCfg* pNewCfg) { for (int32_t i = 0; i < pNewCfg->replicaNum; ++i) { SRaftId raftId = { diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c index 7d534c671e..2a44588eef 100644 --- a/source/libs/sync/src/syncMessage.c +++ b/source/libs/sync/src/syncMessage.c @@ -176,7 +176,7 @@ int32_t syncBuildAppendEntriesFromRaftEntry(SSyncNode* pNode, SSyncRaftEntry* pE pMsg->prevLogTerm = prevLogTerm; pMsg->vgId = pNode->vgId; pMsg->srcId = pNode->myRaftId; - pMsg->term = pNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pNode); pMsg->commitIndex = pNode->commitIndex; pMsg->privateTerm = 0; return 0; diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index b3eb5684cf..c9ff2d2dcc 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -61,6 +61,7 @@ int32_t syncLogBufferAppend(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt SSyncRaftEntry* pMatch = pBuf->entries[(index - 1 + pBuf->size) % pBuf->size].pItem; ASSERTS(pMatch != NULL, "no matched log entry"); ASSERT(pMatch->index + 1 == index); + ASSERT(pMatch->term <= pEntry->term); SSyncLogBufEntry tmp = {.pItem = pEntry, .prevLogIndex = pMatch->index, .prevLogTerm = pMatch->term}; pBuf->entries[index % pBuf->size] = tmp; @@ -514,7 +515,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm SSyncLogStore* pLogStore = pNode->pLogStore; SSyncFSM* pFsm = pNode->pFsm; ESyncState role = pNode->state; - SyncTerm term = pNode->raftStore.currentTerm; + SyncTerm currentTerm = raftStoreGetTerm(pNode); SyncGroupId vgId = pNode->vgId; int32_t ret = -1; int64_t upperIndex = TMIN(commitIndex, pBuf->matchIndex); @@ -529,7 +530,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm } sTrace("vgId:%d, commit. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), role:%d, term:%" PRId64, - pNode->vgId, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, role, term); + pNode->vgId, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, role, currentTerm); // execute in fsm for (int64_t index = pBuf->commitIndex + 1; index <= upperIndex; index++) { @@ -545,16 +546,16 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm pEntry->term, TMSG_INFO(pEntry->originalRpcType)); } - if (syncLogFsmExecute(pNode, pFsm, role, term, pEntry, 0) != 0) { + if (syncLogFsmExecute(pNode, pFsm, role, currentTerm, pEntry, 0) != 0) { sError("vgId:%d, failed to execute sync log entry. index:%" PRId64 ", term:%" PRId64 ", role:%d, current term:%" PRId64, - vgId, pEntry->index, pEntry->term, role, term); + vgId, pEntry->index, pEntry->term, role, currentTerm); goto _out; } pBuf->commitIndex = index; sTrace("vgId:%d, committed index:%" PRId64 ", term:%" PRId64 ", role:%d, current term:%" PRId64 "", pNode->vgId, - pEntry->index, pEntry->term, role, term); + pEntry->index, pEntry->term, role, currentTerm); if (!inBuf) { syncEntryDestroy(pEntry); @@ -563,7 +564,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm } // recycle - SyncIndex until = pBuf->commitIndex - (pBuf->size >> 4); + SyncIndex until = pBuf->commitIndex - TSDB_SYNC_LOG_BUFFER_RETENTION; for (SyncIndex index = pBuf->startIndex; index < until; index++) { SSyncRaftEntry* pEntry = pBuf->entries[(index + pBuf->size) % pBuf->size].pItem; ASSERT(pEntry != NULL); @@ -576,7 +577,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm _out: // mark as restored if needed if (!pNode->restoreFinish && pBuf->commitIndex >= pNode->commitIndex && pEntry != NULL && - pNode->raftStore.currentTerm <= pEntry->term) { + currentTerm <= pEntry->term) { pNode->pFsm->FpRestoreFinishCb(pNode->pFsm); pNode->restoreFinish = true; sInfo("vgId:%d, restore finished. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")", pNode->vgId, diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index 197d1463fd..68e735cf0d 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -18,6 +18,9 @@ #include "syncUtil.h" #include "tjson.h" +int32_t raftStoreReadFile(SSyncNode *pNode); +int32_t raftStoreWriteFile(SSyncNode *pNode); + static int32_t raftStoreDecode(const SJson *pJson, SRaftStore *pStore) { int32_t code = 0; @@ -150,27 +153,53 @@ _OVER: return code; } +int32_t raftStoreOpen(SSyncNode *pNode) { + taosThreadMutexInit(&pNode->raftStore.mutex, NULL); + return raftStoreReadFile(pNode); +} + +void raftStoreClose(SSyncNode *pNode) { taosThreadMutexDestroy(&pNode->raftStore.mutex); } + bool raftStoreHasVoted(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); bool b = syncUtilEmptyId(&pNode->raftStore.voteFor); + taosThreadMutexUnlock(&pNode->raftStore.mutex); return (!b); } void raftStoreVote(SSyncNode *pNode, SRaftId *pRaftId) { + taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.voteFor = *pRaftId; (void)raftStoreWriteFile(pNode); + taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreClearVote(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.voteFor = EMPTY_RAFT_ID; (void)raftStoreWriteFile(pNode); + taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreNextTerm(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); pNode->raftStore.currentTerm++; (void)raftStoreWriteFile(pNode); + taosThreadMutexUnlock(&pNode->raftStore.mutex); } void raftStoreSetTerm(SSyncNode *pNode, SyncTerm term) { - pNode->raftStore.currentTerm = term; - (void)raftStoreWriteFile(pNode); + taosThreadMutexLock(&pNode->raftStore.mutex); + if (pNode->raftStore.currentTerm < term) { + pNode->raftStore.currentTerm = term; + (void)raftStoreWriteFile(pNode); + } + taosThreadMutexUnlock(&pNode->raftStore.mutex); +} + +SyncTerm raftStoreGetTerm(SSyncNode *pNode) { + taosThreadMutexLock(&pNode->raftStore.mutex); + SyncTerm term = pNode->raftStore.currentTerm; + taosThreadMutexUnlock(&pNode->raftStore.mutex); + return term; } diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 3df203221b..8cdf821cff 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -107,7 +107,7 @@ int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) { SyncHeartbeat* pSyncMsg = rpcMsg.pCont; pSyncMsg->srcId = pSyncNode->myRaftId; pSyncMsg->destId = pSyncNode->peersId[i]; - pSyncMsg->term = pSyncNode->raftStore.currentTerm; + pSyncMsg->term = raftStoreGetTerm(pSyncNode); pSyncMsg->commitIndex = pSyncNode->commitIndex; pSyncMsg->minMatchIndex = syncMinMatchIndex(pSyncNode); pSyncMsg->privateTerm = 0; diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c index 069ea2ea88..2fda2a19b8 100644 --- a/source/libs/sync/src/syncRequestVote.c +++ b/source/libs/sync/src/syncRequestVote.c @@ -97,15 +97,14 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { } bool logOK = syncNodeOnRequestVoteLogOK(ths, pMsg); - // maybe update term - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(ths)) { syncNodeStepDown(ths, pMsg->term); - // syncNodeUpdateTerm(ths, pMsg->term); } - ASSERT(pMsg->term <= ths->raftStore.currentTerm); + SyncTerm currentTerm = raftStoreGetTerm(ths); + ASSERT(pMsg->term <= currentTerm); - bool grant = (pMsg->term == ths->raftStore.currentTerm) && logOK && + bool grant = (pMsg->term == currentTerm) && logOK && ((!raftStoreHasVoted(ths)) || (syncUtilSameId(&ths->raftStore.voteFor, &pMsg->srcId))); if (grant) { // maybe has already voted for pMsg->srcId @@ -113,7 +112,7 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { raftStoreVote(ths, &(pMsg->srcId)); // candidate ? - syncNodeStepDown(ths, ths->raftStore.currentTerm); + syncNodeStepDown(ths, currentTerm); // forbid elect for this round syncNodeResetElectTimer(ths); @@ -127,8 +126,9 @@ int32_t syncNodeOnRequestVote(SSyncNode* ths, const SRpcMsg* pRpcMsg) { SyncRequestVoteReply* pReply = rpcMsg.pCont; pReply->srcId = ths->myRaftId; pReply->destId = pMsg->srcId; - pReply->term = ths->raftStore.currentTerm; + pReply->term = currentTerm; pReply->voteGranted = grant; + ASSERT(!grant || pMsg->term == pReply->term); // trace log syncLogRecvRequestVote(ths, pMsg, pReply->voteGranted, ""); diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c index a0d6cbf597..25c9f813a6 100644 --- a/source/libs/sync/src/syncRequestVoteReply.c +++ b/source/libs/sync/src/syncRequestVoteReply.c @@ -47,27 +47,21 @@ int32_t syncNodeOnRequestVoteReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { syncLogRecvRequestVoteReply(ths, pMsg, "not in my config"); return -1; } - + SyncTerm currentTerm = raftStoreGetTerm(ths); // drop stale response - if (pMsg->term < ths->raftStore.currentTerm) { + if (pMsg->term < currentTerm) { syncLogRecvRequestVoteReply(ths, pMsg, "drop stale response"); return -1; } - // ASSERT(!(pMsg->term > ths->raftStore.currentTerm)); - // no need this code, because if I receive reply.term, then I must have sent for that term. - // if (pMsg->term > ths->raftStore.currentTerm) { - // syncNodeUpdateTerm(ths, pMsg->term); - // } - - if (pMsg->term > ths->raftStore.currentTerm) { + if (pMsg->term > currentTerm) { syncLogRecvRequestVoteReply(ths, pMsg, "error term"); syncNodeStepDown(ths, pMsg->term); return -1; } syncLogRecvRequestVoteReply(ths, pMsg, ""); - ASSERT(pMsg->term == ths->raftStore.currentTerm); + ASSERT(pMsg->term == currentTerm); // This tallies votes even when the current state is not Candidate, // but they won't be looked at, so it doesn't matter. diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index 9373eccaef..f9f14c2e00 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -143,7 +143,7 @@ static void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl, bool rsp) { .state = pNode->state, .seqNum = *pSeqNum, .term = SYNC_TERM_INVALID, - .currentTerm = pNode->raftStore.currentTerm, + .currentTerm = SYNC_TERM_INVALID, .flag = 0, }; diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 18f263cc95..a83a19928e 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -43,7 +43,7 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI pSender->sendingMS = SYNC_SNAPSHOT_RETRY_MS; pSender->pSyncNode = pSyncNode; pSender->replicaIndex = replicaIndex; - pSender->term = pSyncNode->raftStore.currentTerm; + pSender->term = raftStoreGetTerm(pSyncNode); pSender->startTime = 0; pSender->endTime = 0; pSender->pSyncNode->pFsm->FpGetSnapshotInfo(pSender->pSyncNode->pFsm, &pSender->snapshot); @@ -90,7 +90,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { memset(&pSender->lastConfig, 0, sizeof(pSender->lastConfig)); pSender->sendingMS = 0; - pSender->term = pSender->pSyncNode->raftStore.currentTerm; + pSender->term = raftStoreGetTerm(pSender->pSyncNode); pSender->startTime = taosGetTimestampMs(); pSender->lastSendTime = pSender->startTime; pSender->finish = false; @@ -105,7 +105,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pSender->pSyncNode); pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -185,7 +185,7 @@ static int32_t snapshotSend(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pSender->pSyncNode); pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -226,7 +226,7 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { SyncSnapshotSend *pMsg = rpcMsg.pCont; pMsg->srcId = pSender->pSyncNode->myRaftId; pMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pMsg->term = raftStoreGetTerm(pSender->pSyncNode); pMsg->beginIndex = pSender->snapshotParam.start; pMsg->lastIndex = pSender->snapshot.lastApplyIndex; pMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -314,7 +314,7 @@ SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId from pReceiver->pWriter = NULL; pReceiver->pSyncNode = pSyncNode; pReceiver->fromId = fromId; - pReceiver->term = pSyncNode->raftStore.currentTerm; + pReceiver->term = raftStoreGetTerm(pSyncNode); pReceiver->snapshot.data = NULL; pReceiver->snapshot.lastApplyIndex = SYNC_INDEX_INVALID; pReceiver->snapshot.lastApplyTerm = 0; @@ -380,7 +380,7 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p pReceiver->start = true; pReceiver->ack = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT; - pReceiver->term = pReceiver->pSyncNode->raftStore.currentTerm; + pReceiver->term = raftStoreGetTerm(pReceiver->pSyncNode); pReceiver->fromId = pPreMsg->srcId; pReceiver->startTime = pPreMsg->startTime; @@ -437,9 +437,8 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap } // maybe update term - if (pReceiver->snapshot.lastApplyTerm > pReceiver->pSyncNode->raftStore.currentTerm) { - pReceiver->pSyncNode->raftStore.currentTerm = pReceiver->snapshot.lastApplyTerm; - (void)raftStoreWriteFile(pReceiver->pSyncNode); + if (pReceiver->snapshot.lastApplyTerm > raftStoreGetTerm(pReceiver->pSyncNode)) { + raftStoreSetTerm(pReceiver->pSyncNode, pReceiver->snapshot.lastApplyTerm); } // stop writer, apply data @@ -584,7 +583,7 @@ _SEND_REPLY: SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -640,7 +639,7 @@ _SEND_REPLY: SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -690,7 +689,7 @@ static int32_t syncNodeOnSnapshotReceive(SSyncNode *pSyncNode, SyncSnapshotSend SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -737,7 +736,7 @@ static int32_t syncNodeOnSnapshotEnd(SSyncNode *pSyncNode, SyncSnapshotSend *pMs SyncSnapshotRsp *pRspMsg = rpcMsg.pCont; pRspMsg->srcId = pSyncNode->myRaftId; pRspMsg->destId = pMsg->srcId; - pRspMsg->term = pSyncNode->raftStore.currentTerm; + pRspMsg->term = raftStoreGetTerm(pSyncNode); pRspMsg->lastIndex = pMsg->lastIndex; pRspMsg->lastTerm = pMsg->lastTerm; pRspMsg->startTime = pReceiver->startTime; @@ -786,13 +785,13 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { return -1; } - if (pMsg->term < pSyncNode->raftStore.currentTerm) { + if (pMsg->term < raftStoreGetTerm(pSyncNode)) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "reject since small term"); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; return -1; } - if (pMsg->term > pSyncNode->raftStore.currentTerm) { + if (pMsg->term > raftStoreGetTerm(pSyncNode)) { syncNodeStepDown(pSyncNode, pMsg->term); } syncNodeResetElectTimer(pSyncNode); @@ -800,7 +799,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { // state, term, seq/ack int32_t code = 0; if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) { - if (pMsg->term == pSyncNode->raftStore.currentTerm) { + if (pMsg->term == raftStoreGetTerm(pSyncNode)) { if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) { syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq pre-snapshot"); code = syncNodeOnSnapshotPrep(pSyncNode, pMsg); @@ -884,7 +883,7 @@ static int32_t syncNodeOnSnapshotPrepRsp(SSyncNode *pSyncNode, SSyncSnapshotSend SyncSnapshotSend *pSendMsg = rpcMsg.pCont; pSendMsg->srcId = pSender->pSyncNode->myRaftId; pSendMsg->destId = pSender->pSyncNode->replicasId[pSender->replicaIndex]; - pSendMsg->term = pSender->pSyncNode->raftStore.currentTerm; + pSendMsg->term = raftStoreGetTerm(pSender->pSyncNode); pSendMsg->beginIndex = pSender->snapshotParam.start; pSendMsg->lastIndex = pSender->snapshot.lastApplyIndex; pSendMsg->lastTerm = pSender->snapshot.lastApplyTerm; @@ -943,10 +942,11 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) { goto _ERROR; } - if (pMsg->term != pSyncNode->raftStore.currentTerm) { + SyncTerm currentTerm = raftStoreGetTerm(pSyncNode); + if (pMsg->term != currentTerm) { syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "snapshot sender and receiver term not match"); sSError(pSender, "snapshot sender term not equal, msg term:%" PRId64 " currentTerm:%" PRId64, pMsg->term, - pSyncNode->raftStore.currentTerm); + currentTerm); terrno = TSDB_CODE_SYN_INTERNAL_ERROR; goto _ERROR; } diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 97641b8f41..a519c76cda 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -154,7 +154,7 @@ static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) { void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) { if (pNode == NULL || pNode->pLogStore == NULL) return; - int64_t currentTerm = pNode->raftStore.currentTerm; + int64_t currentTerm = raftStoreGetTerm(pNode); // save error code, otherwise it will be overwritten int32_t errCode = terrno; @@ -260,7 +260,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla pNode->vgId, eventLog, syncStr(pNode->state), pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->replicaIndex, - DID(&pNode->replicasId[pSender->replicaIndex]), pNode->raftStore.currentTerm, pNode->commitIndex, + DID(&pNode->replicasId[pSender->replicaIndex]), raftStoreGetTerm(pNode), pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, @@ -308,7 +308,7 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex, - pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, + raftStoreGetTerm(pNode), pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr); diff --git a/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c b/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c index 1dbf4fb4fb..18a75934fd 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncMainDebug.c @@ -199,7 +199,7 @@ inline char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { ", sby:%d, " "r-num:%d, " "lcfg:%" PRId64 ", chging:%d, rsto:%d", - pSyncNode->vgId, syncStr(pSyncNode->state), pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, + pSyncNode->vgId, syncStr(pSyncNode->state), raftStoreGetTerm(pSyncNode), pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, pSyncNode->raftCfg.isStandBy, pSyncNode->replicaNum, pSyncNode->raftCfg.lastConfigIndex, pSyncNode->changing, pSyncNode->restoreFinish); diff --git a/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c b/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c index d8740de16a..2edcb0ad4d 100644 --- a/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c +++ b/source/libs/sync/test/sync_test_lib/src/syncSnapshotDebug.c @@ -137,7 +137,7 @@ int32_t syncNodeOnPreSnapshot(SSyncNode *ths, SyncPreSnapshot *pMsg) { SyncPreSnapshotReply *pMsgReply = syncPreSnapshotReplyBuild(ths->vgId); pMsgReply->srcId = ths->myRaftId; pMsgReply->destId = pMsg->srcId; - pMsgReply->term = ths->raftStore.currentTerm; + pMsgReply->term = raftStoreGetTerm(ths); SSyncLogStoreData *pData = ths->pLogStore->data; SWal *pWal = pData->pWal; diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 1d20b4d43d..bbe8f3eeac 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -80,7 +80,7 @@ STfs *tfsOpen(SDiskCfg *pCfg, int32_t ndisk) { void tfsClose(STfs *pTfs) { if (pTfs == NULL) return; - for (int32_t level = 0; level < TFS_MAX_LEVEL; level++) { + for (int32_t level = 0; level <= TFS_MAX_LEVEL; level++) { tfsDestroyTier(&pTfs->tiers[level]); } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 5f964f6b1a..a41cc0068c 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -94,8 +94,8 @@ typedef void* queue[2]; /* Return the structure holding the given element. */ #define QUEUE_DATA(e, type, field) ((type*)((void*)((char*)(e)-offsetof(type, field)))) -//#define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit -//#define TRANS_RETRY_INTERVAL 15 // retry interval (ms) +// #define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit +// #define TRANS_RETRY_INTERVAL 15 // retry interval (ms) #define TRANS_CONN_TIMEOUT 3000 // connect timeout (ms) #define TRANS_READ_TIMEOUT 3000 // read timeout (ms) #define TRANS_PACKET_LIMIT 1024 * 1024 * 512 diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index 2db4a72795..1f3c98ad72 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -64,6 +64,11 @@ typedef struct { void (*destroyFp)(void* ahandle); bool (*failFastFp)(tmsg_t msgType); + int32_t connLimitNum; + int8_t connLimitLock; // 0: no lock. 1. lock + int8_t supportBatch; // 0: no batch, 1: support batch + int32_t batchSize; + int index; void* parent; void* tcphandle; // returned handle from TCP initialization diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index cd508f6fe9..8e5f79137f 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -420,7 +420,13 @@ static void transHttpEnvInit() { uv_loop_init(http->loop); http->asyncPool = transAsyncPoolCreate(http->loop, 1, http, httpAsyncCb); - + if (NULL == http->asyncPool) { + taosMemoryFree(http->loop); + taosMemoryFree(http); + http = NULL; + return; + } + int err = taosThreadCreate(&http->thread, NULL, httpThread, (void*)http); if (err != 0) { taosMemoryFree(http->loop); diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 47b1ac5ca7..16ea25a41a 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -67,6 +67,10 @@ void* rpcOpen(const SRpcInit* pInit) { pRpc->startTimer = pInit->tfp; pRpc->destroyFp = pInit->dfp; pRpc->failFastFp = pInit->ffp; + pRpc->connLimitNum = pInit->connLimitNum; + pRpc->connLimitLock = pInit->connLimitLock; + pRpc->supportBatch = pInit->supportBatch; + pRpc->batchSize = pInit->batchSize; pRpc->numOfThreads = pInit->numOfThreads > TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS : pInit->numOfThreads; if (pRpc->numOfThreads <= 0) { diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 111742a6f4..7e1aeafaad 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -12,7 +12,6 @@ * along with this program. If not, see . */ -#ifdef USE_UV #include "transComm.h" typedef struct SConnList { @@ -20,6 +19,30 @@ typedef struct SConnList { int32_t size; } SConnList; +typedef struct { + queue wq; + int32_t len; + + int connMax; + int connCnt; + int batchLenLimit; + int sending; + + char* dst; + char* ip; + uint16_t port; + +} SCliBatchList; + +typedef struct { + queue wq; + queue listq; + int32_t wLen; + int32_t batchSize; // + int32_t batch; + SCliBatchList* pList; +} SCliBatch; + typedef struct SCliConn { T_REF_DECLARE() uv_connect_t connReq; @@ -40,9 +63,10 @@ typedef struct SCliConn { bool broken; // link broken or not ConnStatus status; // - int64_t refId; - char* ip; - uint32_t port; + SCliBatch* pBatch; + + int64_t refId; + char* ip; SDelayTask* task; @@ -80,11 +104,14 @@ typedef struct SCliThrd { uint64_t nextTimeout; // next timeout void* pTransInst; // + int connCount; void (*destroyAhandleFp)(void* ahandle); SHashObj* fqdn2ipCache; SCvtAddr cvtAddr; SHashObj* failFastCache; + SHashObj* connLimitCache; + SHashObj* batchCache; SCliMsg* stopMsg; @@ -131,6 +158,11 @@ static void cliAsyncCb(uv_async_t* handle); static void cliIdleCb(uv_idle_t* handle); static void cliPrepareCb(uv_prepare_t* handle); +static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd); +static void cliSendBatchCb(uv_write_t* req, int status); + +SCliBatch* cliGetHeadFromList(SCliBatchList* pList); + static bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead); static int32_t allocConnRef(SCliConn* conn, bool update); @@ -141,8 +173,11 @@ static SCliConn* cliCreateConn(SCliThrd* thrd); static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/); static void cliDestroy(uv_handle_t* handle); static void cliSend(SCliConn* pConn); +static void cliSendBatch(SCliConn* pConn); static void cliDestroyConnMsgs(SCliConn* conn, bool destroy); +static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, char* ip, uint16_t port); + // cli util func static FORCE_INLINE bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx); static FORCE_INLINE void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr); @@ -157,6 +192,7 @@ static void cliHandleResp(SCliConn* conn); // handle except about conn static void cliHandleExcept(SCliConn* conn); static void cliReleaseUnfinishedMsg(SCliConn* conn); +static void cliHandleFastFail(SCliConn* pConn, int status); // handle req from app static void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd); @@ -165,6 +201,8 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd); static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd); static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, NULL, cliHandleUpdate}; +/// static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, +/// NULL,cliHandleUpdate}; static FORCE_INLINE void destroyUserdata(STransMsg* userdata); static FORCE_INLINE void destroyCmsg(void* cmsg); @@ -285,6 +323,7 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) { } destroyCmsg(msg); } + transQueueClear(&conn->cliMsgs); memset(&conn->ctx, 0, sizeof(conn->ctx)); } bool cliMaySendCachedMsg(SCliConn* conn) { @@ -487,9 +526,9 @@ void cliConnTimeout(uv_timer_t* handle) { uv_timer_stop(handle); handle->data = NULL; taosArrayPush(pThrd->timerList, &conn->timer); - conn->timer = NULL; - cliHandleExceptImpl(conn, -1); + + cliHandleFastFail(conn, UV_ECANCELED); } void cliReadTimeoutCb(uv_timer_t* handle) { // set up timeout cb @@ -569,17 +608,15 @@ static void addConnToPool(void* pool, SCliConn* conn) { conn->status = ConnInPool; if (conn->list == NULL) { - char key[TSDB_FQDN_LEN + 64] = {0}; - CONN_CONSTRUCT_HASH_KEY(key, conn->ip, conn->port); tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap); - conn->list = taosHashGet((SHashObj*)pool, key, strlen(key)); + conn->list = taosHashGet((SHashObj*)pool, conn->ip, strlen(conn->ip)); } else { tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap); } QUEUE_PUSH(&conn->list->conns, &conn->q); conn->list->size += 1; - if (conn->list->size >= 50) { + if (conn->list->size >= 250) { STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg)); arg->param1 = conn; arg->param2 = thrd; @@ -671,7 +708,6 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) { conn->stream = (uv_stream_t*)taosMemoryMalloc(sizeof(uv_tcp_t)); uv_tcp_init(pThrd->loop, (uv_tcp_t*)(conn->stream)); conn->stream->data = conn; - // transSetConnOption((uv_tcp_t*)conn->stream); uv_timer_t* timer = taosArrayGetSize(pThrd->timerList) > 0 ? *(uv_timer_t**)taosArrayPop(pThrd->timerList) : NULL; if (timer == NULL) { @@ -694,6 +730,7 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) { conn->broken = 0; transRefCliHandle(conn); + atomic_add_fetch_32(&pThrd->connCount, 1); allocConnRef(conn, false); return conn; @@ -737,6 +774,11 @@ static void cliDestroy(uv_handle_t* handle) { conn->timer->data = NULL; conn->timer = NULL; } + int32_t* oVal = taosHashGet(pThrd->connLimitCache, conn->ip, strlen(conn->ip)); + int32_t nVal = oVal == NULL ? 0 : (*oVal) - 1; + taosHashPut(pThrd->connLimitCache, conn->ip, strlen(conn->ip), &nVal, sizeof(nVal)); + + atomic_sub_fetch_32(&pThrd->connCount, 1); transReleaseExHandle(transGetRefMgt(), conn->refId); transRemoveExHandle(transGetRefMgt(), conn->refId); @@ -748,6 +790,7 @@ static void cliDestroy(uv_handle_t* handle) { tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn); transReqQueueClear(&conn->wreqQueue); transDestroyBuffer(&conn->readBuf); + taosMemoryFree(conn); } static bool cliHandleNoResp(SCliConn* conn) { @@ -798,7 +841,65 @@ static void cliSendCb(uv_write_t* req, int status) { } uv_read_start((uv_stream_t*)pConn->stream, cliAllocRecvBufferCb, cliRecvCb); } +void cliSendBatch(SCliConn* pConn) { + SCliThrd* pThrd = pConn->hostThrd; + STrans* pTransInst = pThrd->pTransInst; + SCliBatch* pBatch = pConn->pBatch; + SCliBatchList* pList = pBatch->pList; + pList->connCnt += 1; + + int32_t wLen = pBatch->wLen; + + uv_buf_t* wb = taosMemoryCalloc(wLen, sizeof(uv_buf_t)); + int i = 0; + + queue* h = NULL; + QUEUE_FOREACH(h, &pBatch->wq) { + SCliMsg* pCliMsg = QUEUE_DATA(h, SCliMsg, q); + + STransConnCtx* pCtx = pCliMsg->ctx; + + STransMsg* pMsg = (STransMsg*)(&pCliMsg->msg); + if (pMsg->pCont == 0) { + pMsg->pCont = (void*)rpcMallocCont(0); + pMsg->contLen = 0; + } + + int msgLen = transMsgLenFromCont(pMsg->contLen); + STransMsgHead* pHead = transHeadFromCont(pMsg->pCont); + + if (pHead->comp == 0) { + pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0; + pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0; + pHead->persist = REQUEST_PERSIS_HANDLE(pMsg) ? 1 : 0; + pHead->msgType = pMsg->msgType; + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + pHead->release = REQUEST_RELEASE_HANDLE(pCliMsg) ? 1 : 0; + memcpy(pHead->user, pTransInst->user, strlen(pTransInst->user)); + pHead->traceId = pMsg->info.traceId; + pHead->magicNum = htonl(TRANS_MAGIC_NUM); + } + pHead->timestamp = taosHton64(taosGetTimestampUs()); + + if (pHead->comp == 0) { + if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) { + msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead); + pHead->msgLen = (int32_t)htonl((uint32_t)msgLen); + } + } else { + msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen)); + } + wb[i++] = uv_buf_init((char*)pHead, msgLen); + } + + uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t)); + req->data = pConn; + tDebug("%s conn %p start to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(pConn), pConn, + pBatch->wLen, pBatch->batchSize); + uv_write(req, (uv_stream_t*)pConn->stream, wb, wLen, cliSendBatchCb); + taosMemoryFree(wb); +} void cliSend(SCliConn* pConn) { SCliThrd* pThrd = pConn->hostThrd; STrans* pTransInst = pThrd->pTransInst; @@ -883,31 +984,137 @@ _RETURN: return; } -void cliConnCb(uv_connect_t* req, int status) { - SCliConn* pConn = req->data; - SCliThrd* pThrd = pConn->hostThrd; +static void cliDestroyBatch(SCliBatch* pBatch) { + while (!QUEUE_IS_EMPTY(&pBatch->wq)) { + queue* h = QUEUE_HEAD(&pBatch->wq); + QUEUE_REMOVE(h); - if (pConn->timer != NULL) { - uv_timer_stop(pConn->timer); - pConn->timer->data = NULL; - taosArrayPush(pThrd->timerList, &pConn->timer); - pConn->timer = NULL; + SCliMsg* p = QUEUE_DATA(h, SCliMsg, q); + destroyCmsg(p); + } + SCliBatchList* p = pBatch->pList; + p->sending -= 1; + taosMemoryFree(pBatch); +} +static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { + if (pBatch == NULL || pBatch->wLen == 0 || QUEUE_IS_EMPTY(&pBatch->wq)) { + return; + } + STrans* pTransInst = pThrd->pTransInst; + SCliBatchList* pList = pBatch->pList; + + SCliConn* conn = getConnFromPool(pThrd->pool, pList->ip, pList->port); + + if (conn == NULL && 0 != cliPreCheckSessionLimit(pThrd, pList->ip, pList->port)) { + tError("%s failed to send batch msg, batch size:%d, msgLen: %d", pTransInst->label, pBatch->wLen, + pBatch->batchSize); + cliDestroyBatch(pBatch); + return; + } + if (conn == NULL) { + conn = cliCreateConn(pThrd); + conn->pBatch = pBatch; + conn->ip = strdup(pList->dst); + + uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, pList->ip); + if (ipaddr == 0xffffffff) { + uv_timer_stop(conn->timer); + conn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &conn->timer); + conn->timer = NULL; + + cliHandleFastFail(conn, -1); + return; + } + struct sockaddr_in addr; + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = ipaddr; + addr.sin_port = (uint16_t)htons(pList->port); + + tTrace("%s conn %p try to connect to %s", pTransInst->label, conn, pList->dst); + int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); + if (fd == -1) { + tError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, + tstrerror(TAOS_SYSTEM_ERROR(errno))); + cliHandleFastFail(conn, -1); + return; + } + int ret = uv_tcp_open((uv_tcp_t*)conn->stream, fd); + if (ret != 0) { + tError("%s conn %p failed to set stream, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); + cliHandleFastFail(conn, -1); + return; + } + ret = transSetConnOption((uv_tcp_t*)conn->stream); + if (ret != 0) { + tError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); + cliHandleFastFail(conn, -1); + return; + } + + ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); + if (ret != 0) { + uv_timer_stop(conn->timer); + conn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &conn->timer); + conn->timer = NULL; + cliHandleFastFail(conn, -1); + return; + } + uv_timer_start(conn->timer, cliConnTimeout, TRANS_CONN_TIMEOUT, 0); + return; } - if (status != 0) { - SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); - STrans* pTransInst = pThrd->pTransInst; + conn->pBatch = pBatch; + cliSendBatch(conn); +} +static void cliSendBatchCb(uv_write_t* req, int status) { + SCliConn* conn = req->data; + SCliThrd* thrd = conn->hostThrd; + SCliBatch* p = conn->pBatch; + + SCliBatchList* pBatchList = p->pList; + SCliBatch* nxtBatch = cliGetHeadFromList(pBatchList); + pBatchList->connCnt -= 1; + + conn->pBatch = NULL; + + if (status != 0) { + tDebug("%s conn %p failed to send batch msg, batch size:%d, msgLen:%d, reason:%s", CONN_GET_INST_LABEL(conn), conn, + p->wLen, p->batchSize, uv_err_name(status)); + cliHandleExcept(conn); + cliHandleBatchReq(nxtBatch, thrd); + } else { + tDebug("%s conn %p succ to send batch msg, batch size:%d, msgLen:%d", CONN_GET_INST_LABEL(conn), conn, p->wLen, + p->batchSize); + + if (nxtBatch != NULL) { + conn->pBatch = nxtBatch; + cliSendBatch(conn); + } else { + addConnToPool(thrd->pool, conn); + } + } + + cliDestroyBatch(p); + taosMemoryFree(req); +} +static void cliHandleFastFail(SCliConn* pConn, int status) { + SCliThrd* pThrd = pConn->hostThrd; + STrans* pTransInst = pThrd->pTransInst; + + if (status == -1) status = ENETUNREACH; + + if (pConn->pBatch == NULL) { + SCliMsg* pMsg = transQueueGet(&pConn->cliMsgs, 0); + + STraceId* trace = &pMsg->msg.info.traceId; + tGError("%s msg %s failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + TMSG_INFO(pMsg->msg.msgType), pConn, pConn->ip, uv_strerror(status)); - tError("%s msg %s failed to send, conn %p failed to connect to %s:%d, reason: %s", CONN_GET_INST_LABEL(pConn), - pMsg ? TMSG_INFO(pMsg->msg.msgType) : 0, pConn, pConn->ip, pConn->port, uv_strerror(status)); if (pMsg != NULL && REQUEST_NO_RESP(&pMsg->msg) && (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { - char* ip = pConn->ip; - uint32_t port = pConn->port; - char key[TSDB_FQDN_LEN + 64] = {0}; - CONN_CONSTRUCT_HASH_KEY(key, ip, port); - - SFailFastItem* item = taosHashGet(pThrd->failFastCache, key, strlen(key)); + SFailFastItem* item = taosHashGet(pThrd->failFastCache, pConn->ip, strlen(pConn->ip)); int64_t cTimestamp = taosGetTimestampMs(); if (item != NULL) { int32_t elapse = cTimestamp - item->timestamp; @@ -919,15 +1126,47 @@ void cliConnCb(uv_connect_t* req, int status) { } } else { SFailFastItem item = {.count = 1, .timestamp = cTimestamp}; - taosHashPut(pThrd->failFastCache, key, strlen(key), &item, sizeof(SFailFastItem)); + taosHashPut(pThrd->failFastCache, pConn->ip, strlen(pConn->ip), &item, sizeof(SFailFastItem)); } } - cliHandleExcept(pConn); + } else { + tError("%s batch msg failed to send, conn %p failed to connect to %s, reason: %s", CONN_GET_INST_LABEL(pConn), + pConn, pConn->ip, uv_strerror(status)); + cliDestroyBatch(pConn->pBatch); + pConn->pBatch = NULL; + } + cliHandleExcept(pConn); +} + +void cliConnCb(uv_connect_t* req, int status) { + SCliConn* pConn = req->data; + SCliThrd* pThrd = pConn->hostThrd; + bool timeout = false; + + if (pConn->timer == NULL) { + timeout = true; + } else { + uv_timer_stop(pConn->timer); + pConn->timer->data = NULL; + taosArrayPush(pThrd->timerList, &pConn->timer); + pConn->timer = NULL; + } + + if (status != 0) { + if (timeout == false) { + cliHandleFastFail(pConn, status); + } else if (timeout == true) { + // already deal by timeout + } return; } - struct sockaddr peername, sockname; - int addrlen = sizeof(peername); + int32_t* oVal = taosHashGet(pThrd->connLimitCache, pConn->ip, strlen(pConn->ip)); + int32_t nVal = oVal == NULL ? 0 : (*oVal) + 1; + taosHashPut(pThrd->connLimitCache, pConn->ip, strlen(pConn->ip), &nVal, sizeof(nVal)); + + struct sockaddr peername, sockname; + int addrlen = sizeof(peername); uv_tcp_getpeername((uv_tcp_t*)pConn->stream, &peername, &addrlen); transSockInfo2Str(&peername, pConn->dst); @@ -936,8 +1175,11 @@ void cliConnCb(uv_connect_t* req, int status) { transSockInfo2Str(&sockname, pConn->src); tTrace("%s conn %p connect to server successfully", CONN_GET_INST_LABEL(pConn), pConn); - - cliSend(pConn); + if (pConn->pBatch != NULL) { + cliSendBatch(pConn); + } else { + cliSend(pConn); + } } static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd) { @@ -1062,12 +1304,32 @@ static FORCE_INLINE void cliUpdateFqdnCache(SHashObj* cache, char* fqdn) { return; } +static int32_t cliPreCheckSessionLimit(SCliThrd* pThrd, char* ip, uint16_t port) { + STrans* pTransInst = pThrd->pTransInst; + + // STransConnCtx* pCtx = pMsg->ctx; + // char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + // int32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + + char key[TSDB_FQDN_LEN + 64] = {0}; + CONN_CONSTRUCT_HASH_KEY(key, ip, port); + + int32_t* val = taosHashGet(pThrd->connLimitCache, key, strlen(key)); + if (val == NULL) return 0; + + if (*val >= pTransInst->connLimitNum) { + return -1; + } + return 0; +} void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { STrans* pTransInst = pThrd->pTransInst; STransConnCtx* pCtx = pMsg->ctx; cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr); STraceId* trace = &pMsg->msg.info.traceId; + char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + uint16_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); if (!EPSET_IS_VALID(&pCtx->epSet)) { tGError("%s, msg %s sent with invalid epset", pTransInst->label, TMSG_INFO(pMsg->msg.msgType)); @@ -1076,16 +1338,13 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { } if (REQUEST_NO_RESP(&pMsg->msg) && (pTransInst->failFastFp != NULL && pTransInst->failFastFp(pMsg->msg.msgType))) { - char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); - uint32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); - char key[TSDB_FQDN_LEN + 64] = {0}; + char key[TSDB_FQDN_LEN + 64] = {0}; CONN_CONSTRUCT_HASH_KEY(key, ip, port); SFailFastItem* item = taosHashGet(pThrd->failFastCache, key, strlen(key)); if (item != NULL) { int32_t elapse = (int32_t)(taosGetTimestampMs() - item->timestamp); if (item->count >= pTransInst->failFastThreshold && (elapse >= 0 && elapse <= pTransInst->failFastInterval)) { - STraceId* trace = &(pMsg->msg.info.traceId); tGTrace("%s, msg %s cancel to send, reason: failed to connect %s:%d: count: %d, at %d", pTransInst->label, TMSG_INFO(pMsg->msg.msgType), ip, port, item->count, elapse); destroyCmsg(pMsg); @@ -1107,6 +1366,13 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { return; } + if (conn == NULL && REQUEST_NO_RESP(&pMsg->msg) && 0 != cliPreCheckSessionLimit(pThrd, ip, port)) { + tGTrace("%s, msg %s cancel to send, reason: %s", pTransInst->label, TMSG_INFO(pMsg->msg.msgType), + tstrerror(TSDB_CODE_RPC_MAX_SESSIONS)); + destroyCmsg(pMsg); + return; + } + if (conn != NULL) { transCtxMerge(&conn->ctx, &pCtx->appCtx); transQueuePush(&conn->cliMsgs, pMsg); @@ -1120,10 +1386,14 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { transCtxMerge(&conn->ctx, &pCtx->appCtx); transQueuePush(&conn->cliMsgs, pMsg); - conn->ip = strdup(EPSET_GET_INUSE_IP(&pCtx->epSet)); - conn->port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + char key[TSDB_FQDN_LEN + 64] = {0}; + char* fqdn = EPSET_GET_INUSE_IP(&pCtx->epSet); + uint16_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + CONN_CONSTRUCT_HASH_KEY(key, fqdn, port); - uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, conn->ip); + conn->ip = strdup(key); + + uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, fqdn); if (ipaddr == 0xffffffff) { uv_timer_stop(conn->timer); conn->timer->data = NULL; @@ -1137,9 +1407,9 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { struct sockaddr_in addr; addr.sin_family = AF_INET; addr.sin_addr.s_addr = ipaddr; - addr.sin_port = (uint16_t)htons((uint16_t)conn->port); + addr.sin_port = (uint16_t)htons(port); - tGTrace("%s conn %p try to connect to %s:%d", pTransInst->label, conn, conn->ip, conn->port); + tGTrace("%s conn %p try to connect to %s", pTransInst->label, conn, conn->ip); int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); if (fd == -1) { tGError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, @@ -1163,45 +1433,168 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb); if (ret != 0) { - tGError("%s conn %p failed to connect to %s:%d, reason:%s", pTransInst->label, conn, conn->ip, conn->port, - uv_err_name(ret)); - uv_timer_stop(conn->timer); conn->timer->data = NULL; taosArrayPush(pThrd->timerList, &conn->timer); conn->timer = NULL; - cliHandleExcept(conn); + cliHandleFastFail(conn, ret); return; } uv_timer_start(conn->timer, cliConnTimeout, TRANS_CONN_TIMEOUT, 0); } tGTrace("%s conn %p ready", pTransInst->label, conn); } + +static void cliNoBatchDealReq(queue* wq, SCliThrd* pThrd) { + int count = 0; + + while (!QUEUE_IS_EMPTY(wq)) { + queue* h = QUEUE_HEAD(wq); + QUEUE_REMOVE(h); + + SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); + (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); + + count++; + } + if (count >= 2) { + tTrace("cli process batch size:%d", count); + } +} +SCliBatch* cliGetHeadFromList(SCliBatchList* pList) { + if (QUEUE_IS_EMPTY(&pList->wq) || pList->connCnt > pList->connMax || pList->sending > pList->connMax) { + return NULL; + } + queue* hr = QUEUE_HEAD(&pList->wq); + QUEUE_REMOVE(hr); + pList->sending += 1; + + pList->len -= 1; + + SCliBatch* batch = QUEUE_DATA(hr, SCliBatch, listq); + return batch; +} + +static void cliBatchDealReq(queue* wq, SCliThrd* pThrd) { + STrans* pInst = pThrd->pTransInst; + + int count = 0; + while (!QUEUE_IS_EMPTY(wq)) { + queue* h = QUEUE_HEAD(wq); + QUEUE_REMOVE(h); + + SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); + if (pMsg->type == Normal && REQUEST_NO_RESP(&pMsg->msg)) { + STransConnCtx* pCtx = pMsg->ctx; + + char* ip = EPSET_GET_INUSE_IP(&pCtx->epSet); + uint32_t port = EPSET_GET_INUSE_PORT(&pCtx->epSet); + char key[TSDB_FQDN_LEN + 64] = {0}; + CONN_CONSTRUCT_HASH_KEY(key, ip, port); + + // SCliBatch** ppBatch = taosHashGet(pThrd->batchCache, key, sizeof(key)); + SCliBatchList** ppBatchList = taosHashGet(pThrd->batchCache, key, sizeof(key)); + if (ppBatchList == NULL || *ppBatchList == NULL) { + SCliBatchList* pBatchList = taosMemoryCalloc(1, sizeof(SCliBatchList)); + QUEUE_INIT(&pBatchList->wq); + pBatchList->connMax = pInst->connLimitNum; + pBatchList->connCnt = 0; + pBatchList->batchLenLimit = pInst->batchSize; + pBatchList->len += 1; + + pBatchList->ip = strdup(ip); + pBatchList->dst = strdup(key); + pBatchList->port = port; + + SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); + QUEUE_INIT(&pBatch->wq); + QUEUE_INIT(&pBatch->listq); + + QUEUE_PUSH(&pBatch->wq, h); + pBatch->wLen += 1; + pBatch->batchSize += pMsg->msg.contLen; + pBatch->pList = pBatchList; + + QUEUE_PUSH(&pBatchList->wq, &pBatch->listq); + + taosHashPut(pThrd->batchCache, key, sizeof(key), &pBatchList, sizeof(void*)); + } else { + if (QUEUE_IS_EMPTY(&(*ppBatchList)->wq)) { + SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); + QUEUE_INIT(&pBatch->wq); + QUEUE_INIT(&pBatch->listq); + + QUEUE_PUSH(&pBatch->wq, h); + pBatch->wLen += 1; + pBatch->batchSize = pMsg->msg.contLen; + pBatch->pList = *ppBatchList; + + QUEUE_PUSH(&((*ppBatchList)->wq), &pBatch->listq); + (*ppBatchList)->len += 1; + + continue; + } + + queue* hdr = QUEUE_TAIL(&((*ppBatchList)->wq)); + SCliBatch* pBatch = QUEUE_DATA(hdr, SCliBatch, listq); + if ((pBatch->batchSize + pMsg->msg.contLen) < (*ppBatchList)->batchLenLimit) { + QUEUE_PUSH(&pBatch->wq, h); + pBatch->batchSize += pMsg->msg.contLen; + pBatch->wLen += 1; + } else { + SCliBatch* pBatch = taosMemoryCalloc(1, sizeof(SCliBatch)); + QUEUE_INIT(&pBatch->wq); + QUEUE_INIT(&pBatch->listq); + + QUEUE_PUSH(&pBatch->wq, h); + pBatch->wLen += 1; + pBatch->batchSize += pMsg->msg.contLen; + pBatch->pList = *ppBatchList; + + QUEUE_PUSH(&((*ppBatchList)->wq), &pBatch->listq); + (*ppBatchList)->len += 1; + } + } + continue; + } + (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); + count++; + } + + void** pIter = taosHashIterate(pThrd->batchCache, NULL); + while (pIter != NULL) { + SCliBatchList* batchList = (SCliBatchList*)(*pIter); + SCliBatch* batch = cliGetHeadFromList(batchList); + if (batch != NULL) { + cliHandleBatchReq(batch, pThrd); + } + pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); + } + + if (count >= 2) { + tTrace("cli process batch size:%d", count); + } +} + static void cliAsyncCb(uv_async_t* handle) { SAsyncItem* item = handle->data; SCliThrd* pThrd = item->pThrd; - SCliMsg* pMsg = NULL; + STrans* pTransInst = pThrd->pTransInst; + SCliMsg* pMsg = NULL; // batch process to avoid to lock/unlock frequently queue wq; taosThreadMutexLock(&item->mtx); QUEUE_MOVE(&item->qmsg, &wq); taosThreadMutexUnlock(&item->mtx); - int count = 0; - while (!QUEUE_IS_EMPTY(&wq)) { - queue* h = QUEUE_HEAD(&wq); - QUEUE_REMOVE(h); - - SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q); - (*cliAsyncHandle[pMsg->type])(pMsg, pThrd); - count++; + int8_t supportBatch = pTransInst->supportBatch; + if (supportBatch == 0) { + cliNoBatchDealReq(&wq, pThrd); + } else if (supportBatch == 1) { + cliBatchDealReq(&wq, pThrd); } - if (count >= 2) { - tTrace("cli process batch size:%d", count); - } - // if (!uv_is_active((uv_handle_t*)pThrd->prepare)) uv_prepare_start(pThrd->prepare, cliPrepareCb); if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd); } @@ -1380,7 +1773,11 @@ static SCliThrd* createThrdObj(void* trans) { taosMemoryFree(pThrd); return NULL; } - pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 8, pThrd, cliAsyncCb); + if (pTransInst->supportBatch) { + pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 4, pThrd, cliAsyncCb); + } else { + pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 8, pThrd, cliAsyncCb); + } if (pThrd->asyncPool == NULL) { tError("failed to init async pool"); uv_loop_close(pThrd->loop); @@ -1414,6 +1811,10 @@ static SCliThrd* createThrdObj(void* trans) { pThrd->destroyAhandleFp = pTransInst->destroyFp; pThrd->fqdn2ipCache = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pThrd->failFastCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + pThrd->connLimitCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, + pTransInst->connLimitLock == 0 ? HASH_NO_LOCK : HASH_ENTRY_LOCK); + + pThrd->batchCache = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pThrd->quit = false; return pThrd; @@ -1442,6 +1843,25 @@ static void destroyThrdObj(SCliThrd* pThrd) { taosMemoryFree(pThrd->loop); taosHashCleanup(pThrd->fqdn2ipCache); taosHashCleanup(pThrd->failFastCache); + taosHashCleanup(pThrd->connLimitCache); + + void** pIter = taosHashIterate(pThrd->batchCache, NULL); + while (pIter != NULL) { + SCliBatchList* pBatchList = (SCliBatchList*)(*pIter); + while (!QUEUE_IS_EMPTY(&pBatchList->wq)) { + queue* h = QUEUE_HEAD(&pBatchList->wq); + QUEUE_REMOVE(h); + + SCliBatch* pBatch = QUEUE_DATA(h, SCliBatch, listq); + cliDestroyBatch(pBatch); + } + taosMemoryFree(pBatchList->ip); + taosMemoryFree(pBatchList->dst); + taosMemoryFree(pBatchList); + + pIter = (void**)taosHashIterate(pThrd->batchCache, pIter); + } + taosHashCleanup(pThrd->batchCache); taosMemoryFree(pThrd); } @@ -1865,6 +2285,19 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); return TSDB_CODE_RPC_BROKEN_LINK; } + /*if (pTransInst->connLimitNum > 0 && REQUEST_NO_RESP(pReq)) { + char key[TSDB_FQDN_LEN + 64] = {0}; + char* ip = EPSET_GET_INUSE_IP((SEpSet*)pEpSet); + uint16_t port = EPSET_GET_INUSE_PORT((SEpSet*)pEpSet); + CONN_CONSTRUCT_HASH_KEY(key, ip, port); + + int32_t* val = taosHashGet(pThrd->connLimitCache, key, strlen(key)); + if (val != NULL && *val >= pTransInst->connLimitNum) { + transFreeMsg(pReq->pCont); + transReleaseExHandle(transGetInstMgt(), (int64_t)shandle); + return TSDB_CODE_RPC_MAX_SESSIONS; + } + }*/ TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); @@ -1989,4 +2422,3 @@ int64_t transAllocHandle() { return exh->refId; } -#endif diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index fa8929f7d9..04e094ae9a 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -12,8 +12,6 @@ * along with this program. If not, see . */ -#ifdef USE_UV - #include "transComm.h" static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; @@ -246,11 +244,11 @@ static bool uvHandleReq(SSvrConn* pConn) { } } else { if (cost >= EXCEPTION_LIMIT_US) { - tGWarn("%s conn %p %s received from %s, local info:%s, len:%d, resp:%d, code:%d, cost:%dus, recv exception", + tGWarn("%s conn %p %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus, recv exception", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, pHead->noResp, transMsg.code, (int)(cost)); } else { - tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, resp:%d, code:%d, cost:%dus", + tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, msgLen, pHead->noResp, transMsg.code, (int)(cost)); } @@ -1347,5 +1345,3 @@ _return2: } int transGetConnInfo(void* thandle, STransHandleInfo* pConnInfo) { return -1; } - -#endif diff --git a/source/libs/transport/test/cliBench.c b/source/libs/transport/test/cliBench.c index 01e88b9988..aaee162cd7 100644 --- a/source/libs/transport/test/cliBench.c +++ b/source/libs/transport/test/cliBench.c @@ -32,22 +32,21 @@ typedef struct { void *pRpc; } SInfo; - void initLogEnv() { - const char *logDir = "/tmp/trans_cli"; - const char* defaultLogFileNamePrefix = "taoslog"; + const char *logDir = "/tmp/trans_cli"; + const char *defaultLogFileNamePrefix = "taoslog"; const int32_t maxLogFileNum = 10000; tsAsyncLog = 0; - //idxDebugFlag = 143; + // idxDebugFlag = 143; strcpy(tsLogDir, (char *)logDir); taosRemoveDir(tsLogDir); - taosMkDir(tsLogDir); - + taosMkDir(tsLogDir); + if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) { - printf("failed to open log file in directory:%s\n", tsLogDir); + printf("failed to open log file in directory:%s\n", tsLogDir); } } - + static void processResponse(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { SInfo *pInfo = (SInfo *)pMsg->info.ahandle; tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, @@ -72,11 +71,12 @@ static void *sendRequest(void *param) { rpcMsg.pCont = rpcMallocCont(pInfo->msgSize); rpcMsg.contLen = pInfo->msgSize; rpcMsg.info.ahandle = pInfo; + rpcMsg.info.noResp = 1; rpcMsg.msgType = 1; tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num); rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL); if (pInfo->num % 20000 == 0) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num); - tsem_wait(&pInfo->rspSem); + // tsem_wait(&pInfo->rspSem); } tDebug("thread:%d, it is over", pInfo->index); @@ -112,7 +112,12 @@ int main(int argc, char *argv[]) { rpcInit.sessions = 100; rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.user = "michael"; + rpcInit.connType = TAOS_CONN_CLIENT; + rpcInit.connLimitNum = 10; + rpcInit.connLimitLock = 1; + rpcInit.batchSize = 16 * 1024; + rpcInit.supportBatch = 1; rpcDebugFlag = 135; for (int i = 1; i < argc; ++i) { @@ -148,7 +153,6 @@ int main(int argc, char *argv[]) { exit(0); } } - initLogEnv(); diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 07109883db..ee56479a31 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -749,28 +749,30 @@ int walMetaDeserialize(SWal* pWal, const char* bytes) { // deserialize SArray* pArray = pWal->fileInfoSet; taosArrayEnsureCap(pArray, sz); - SWalFileInfo* pData = pArray->pData; + for (int i = 0; i < sz; i++) { - cJSON* pInfoJson = cJSON_GetArrayItem(pFiles, i); + pInfoJson = cJSON_GetArrayItem(pFiles, i); if (!pInfoJson) goto _err; - SWalFileInfo* pInfo = &pData[i]; + + SWalFileInfo info = {0}; + pField = cJSON_GetObjectItem(pInfoJson, "firstVer"); if (!pField) goto _err; - pInfo->firstVer = atoll(cJSON_GetStringValue(pField)); + info.firstVer = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "lastVer"); if (!pField) goto _err; - pInfo->lastVer = atoll(cJSON_GetStringValue(pField)); + info.lastVer = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "createTs"); if (!pField) goto _err; - pInfo->createTs = atoll(cJSON_GetStringValue(pField)); + info.createTs = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "closeTs"); if (!pField) goto _err; - pInfo->closeTs = atoll(cJSON_GetStringValue(pField)); + info.closeTs = atoll(cJSON_GetStringValue(pField)); pField = cJSON_GetObjectItem(pInfoJson, "fileSize"); if (!pField) goto _err; - pInfo->fileSize = atoll(cJSON_GetStringValue(pField)); + info.fileSize = atoll(cJSON_GetStringValue(pField)); + taosArrayPush(pArray, &info); } - taosArraySetSize(pArray, sz); pWal->fileInfoSet = pArray; pWal->writeCur = sz - 1; cJSON_Delete(pRoot); diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 0562dc10ce..958e7dc23d 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -120,16 +120,16 @@ int32_t walRollback(SWal *pWal, int64_t ver) { // delete files in descending order int fileSetSize = taosArrayGetSize(pWal->fileInfoSet); - for (int i = fileSetSize - 1; i >= pWal->writeCur + 1; i--) { - walBuildLogName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr); + for (int i = pWal->writeCur + 1; i < fileSetSize; i++) { + SWalFileInfo *pInfo = taosArrayPop(pWal->fileInfoSet); + + walBuildLogName(pWal, pInfo->firstVer, fnameStr); wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr); taosRemoveFile(fnameStr); - walBuildIdxName(pWal, ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, i))->firstVer, fnameStr); + walBuildIdxName(pWal, pInfo->firstVer, fnameStr); wDebug("vgId:%d, wal remove file %s for rollback", pWal->cfg.vgId, fnameStr); taosRemoveFile(fnameStr); } - // pop from fileInfoSet - taosArraySetSize(pWal->fileInfoSet, pWal->writeCur + 1); } walBuildIdxName(pWal, walGetCurFileFirstVer(pWal), fnameStr); @@ -202,11 +202,6 @@ int32_t walRollback(SWal *pWal, int64_t ver) { return -1; } pWal->vers.lastVer = ver - 1; -#if 0 - if (pWal->vers.lastVer < pWal->vers.firstVer) { - A(pWal->vers.lastVer == pWal->vers.firstVer - 1); - } -#endif ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->lastVer = ver - 1; ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize = entry.offset; taosCloseFile(&pIdxFile); diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index ddb8eca0eb..fac547ca99 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -1078,7 +1078,7 @@ int32_t taosCreateSocketWithTimeout(uint32_t timeout) { // return -1; //} #else // Linux like systems - uint32_t conn_timeout_ms = timeout * 1000; + uint32_t conn_timeout_ms = timeout; if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) { taosCloseSocketNoCheck1(fd); return -1; diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c index c083ce2f7f..0bae81f119 100644 --- a/source/util/src/tarray.c +++ b/source/util/src/tarray.c @@ -48,6 +48,26 @@ SArray* taosArrayInit(size_t size, size_t elemSize) { return pArray; } +SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize) { + SArray* pArray = taosMemoryMalloc(sizeof(SArray)); + if (pArray == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + pArray->size = initialSize; + pArray->pData = taosMemoryCalloc(initialSize, elemSize); + if (pArray->pData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + taosMemoryFree(pArray); + return NULL; + } + + pArray->capacity = initialSize; + pArray->elemSize = elemSize; + return pArray; +} + static int32_t taosArrayResize(SArray* pArray) { assert(pArray->size >= pArray->capacity); @@ -225,7 +245,13 @@ void* taosArrayGetP(const SArray* pArray, size_t index) { return *p; } -void* taosArrayGetLast(const SArray* pArray) { return TARRAY_GET_ELEM(pArray, pArray->size - 1); } +void* taosArrayGetLast(const SArray* pArray) { + if (pArray->size == 0) { + return NULL; + } + + return TARRAY_GET_ELEM(pArray, pArray->size - 1); +} size_t taosArrayGetSize(const SArray* pArray) { if (pArray == NULL) { @@ -234,11 +260,6 @@ size_t taosArrayGetSize(const SArray* pArray) { return TARRAY_SIZE(pArray); } -void taosArraySetSize(SArray* pArray, size_t size) { - assert(size <= pArray->capacity); - pArray->size = size; -} - void* taosArrayInsert(SArray* pArray, size_t index, void* pData) { if (pArray == NULL || pData == NULL) { return NULL; diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index 64d550e874..695a83abb1 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -228,6 +228,7 @@ int32_t tsCompressINTImp(const char *const input, const int32_t nelements, char } int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, char *const output, const char type) { + int32_t word_length = 0; switch (type) { case TSDB_DATA_TYPE_BIGINT: @@ -263,6 +264,177 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha int32_t _pos = 0; int64_t prev_value = 0; +#if __AVX2__ + while (1) { + if (_pos == nelements) break; + + uint64_t w = 0; + memcpy(&w, ip, LONG_BYTES); + + char selector = (char)(w & INT64MASK(4)); // selector = 4 + char bit = bit_per_integer[(int32_t)selector]; // bit = 3 + int32_t elems = selector_to_elems[(int32_t)selector]; + + // Optimize the performance, by remove the constantly switch operation. + int32_t v = 4; + uint64_t zigzag_value = 0; + uint64_t mask = INT64MASK(bit); + + switch (type) { + case TSDB_DATA_TYPE_BIGINT: { + int64_t* p = (int64_t*) output; + + int32_t gRemainder = (nelements - _pos); + int32_t num = (gRemainder > elems)? elems:gRemainder; + + int32_t batch = num >> 2; + int32_t remain = num & 0x03; + if (selector == 0 || selector == 1) { + if (tsAVX2Enable && tsSIMDBuiltins) { + for (int32_t i = 0; i < batch; ++i) { + __m256i prev = _mm256_set1_epi64x(prev_value); + _mm256_storeu_si256((__m256i *)&p[_pos], prev); + _pos += 4; + } + + for (int32_t i = 0; i < remain; ++i) { + p[_pos++] = prev_value; + } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + p[_pos++] = prev_value; + v += bit; + } + } + } else { + if (tsAVX2Enable && tsSIMDBuiltins) { + __m256i base = _mm256_set1_epi64x(w); + __m256i maskVal = _mm256_set1_epi64x(mask); + + __m256i shiftBits = _mm256_set_epi64x(bit * 3 + 4, bit * 2 + 4, bit + 4, 4); + __m256i inc = _mm256_set1_epi64x(bit << 2); + + for (int32_t i = 0; i < batch; ++i) { + __m256i after = _mm256_srlv_epi64(base, shiftBits); + __m256i zigzagVal = _mm256_and_si256(after, maskVal); + + // ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1))) + __m256i signmask = _mm256_and_si256(_mm256_set1_epi64x(1), zigzagVal); + signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask); + // get the four zigzag values here + __m256i delta = _mm256_xor_si256(_mm256_srli_epi64(zigzagVal, 1), signmask); + + // calculate the cumulative sum (prefix sum) for each number + // decode[0] = prev_value + final[0] + // decode[1] = decode[0] + final[1] -----> prev_value + final[0] + final[1] + // decode[2] = decode[1] + final[1] -----> prev_value + final[0] + final[1] + final[2] + // decode[3] = decode[2] + final[1] -----> prev_value + final[0] + final[1] + final[2] + final[3] + + // 1, 2, 3, 4 + //+ 0, 1, 2, 3 + // 1, 3, 5, 7 + // shift and add for the first round + __m128i prev = _mm_set1_epi64x(prev_value); + delta = _mm256_add_epi64(delta, _mm256_slli_si256(delta, 8)); + _mm256_storeu_si256((__m256i *)&p[_pos], delta); + + // 1, 3, 5, 7 + //+ 0, 0, 1, 3 + // 1, 3, 6, 10 + // shift and add operation for the second round + __m128i firstPart = _mm_loadu_si128((__m128i *)&p[_pos]); + __m128i secPart = _mm_add_epi64(_mm_loadu_si128((__m128i *)&p[_pos + 2]), firstPart); + firstPart = _mm_add_epi64(firstPart, prev); + secPart = _mm_add_epi64(secPart, prev); + + // save it in the memory + _mm_storeu_si128((__m128i *)&p[_pos], firstPart); + _mm_storeu_si128((__m128i *)&p[_pos + 2], secPart); + + shiftBits = _mm256_add_epi64(shiftBits, inc); + prev_value = p[_pos + 3]; + _pos += 4; + } + + // handle the remain value + for (int32_t i = 0; i < remain; i++) { + zigzag_value = ((w >> (v + (batch * bit))) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + + p[_pos++] = prev_value; + v += bit; + } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + + p[_pos++] = prev_value; + v += bit; + } + } + } + } break; + case TSDB_DATA_TYPE_INT: { + int32_t* p = (int32_t*) output; + + if (selector == 0 || selector == 1) { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + p[_pos++] = (int32_t)prev_value; + } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + + p[_pos++] = (int32_t)prev_value; + v += bit; + } + } + } break; + case TSDB_DATA_TYPE_SMALLINT: { + int16_t* p = (int16_t*) output; + + if (selector == 0 || selector == 1) { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + p[_pos++] = (int16_t)prev_value; + } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + + p[_pos++] = (int16_t)prev_value; + v += bit; + } + } + } break; + + case TSDB_DATA_TYPE_TINYINT: { + int8_t *p = (int8_t *)output; + + if (selector == 0 || selector == 1) { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + p[_pos++] = (int8_t)prev_value; + } + } else { + for (int32_t i = 0; i < elems && count < nelements; i++, count++) { + zigzag_value = ((w >> v) & mask); + prev_value += ZIGZAG_DECODE(int64_t, zigzag_value); + + p[_pos++] = (int8_t)prev_value; + v += bit; + } + } + } break; + } + + ip += LONG_BYTES; + } + + return nelements * word_length; +#else + while (1) { if (count == nelements) break; @@ -273,94 +445,47 @@ int32_t tsDecompressINTImp(const char *const input, const int32_t nelements, cha char bit = bit_per_integer[(int32_t)selector]; // bit = 3 int32_t elems = selector_to_elems[(int32_t)selector]; - // Optimize the performance, by remove the constantly switch operation. - int32_t v = 0; - uint64_t zigzag_value; + for (int32_t i = 0; i < elems; i++) { + uint64_t zigzag_value; - switch (type) { - case TSDB_DATA_TYPE_BIGINT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); - } - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; + if (selector == 0 || selector == 1) { + zigzag_value = 0; + } else { + zigzag_value = ((w >> (4 + bit * i)) & INT64MASK(bit)); + } + int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); + int64_t curr_value = diff + prev_value; + prev_value = curr_value; + switch (type) { + case TSDB_DATA_TYPE_BIGINT: *((int64_t *)output + _pos) = (int64_t)curr_value; _pos++; - - v += bit; - if ((++count) == nelements) break; - } - } break; - case TSDB_DATA_TYPE_INT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); - } - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; - + break; + case TSDB_DATA_TYPE_INT: *((int32_t *)output + _pos) = (int32_t)curr_value; _pos++; - - v += bit; - if ((++count) == nelements) break; - } - } break; - case TSDB_DATA_TYPE_SMALLINT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); - } - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; - + break; + case TSDB_DATA_TYPE_SMALLINT: *((int16_t *)output + _pos) = (int16_t)curr_value; _pos++; - - v += bit; - if ((++count) == nelements) break; - } - } break; - - case TSDB_DATA_TYPE_TINYINT: { - for (int32_t i = 0; i < elems; i++) { - if (selector == 0 || selector == 1) { - zigzag_value = 0; - } else { - zigzag_value = ((w >> (4 + v)) & INT64MASK(bit)); - } - - int64_t diff = ZIGZAG_DECODE(int64_t, zigzag_value); - int64_t curr_value = diff + prev_value; - prev_value = curr_value; - + break; + case TSDB_DATA_TYPE_TINYINT: *((int8_t *)output + _pos) = (int8_t)curr_value; _pos++; - - v += bit; - if ((++count) == nelements) break; - } - } break; + break; + default: + perror("Wrong integer types.\n"); + return -1; + } + count++; + if (count == nelements) break; } - ip += LONG_BYTES; } return nelements * word_length; +#endif } /* ----------------------------------------------Bool Compression diff --git a/source/util/src/terror.c b/source/util/src/terror.c index d5625c3810..fac0745c06 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -52,6 +52,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_BROKEN_LINK, "Conn is broken") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_TIMEOUT, "Conn read timeout") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED, "some vnode/qnode/mnode(s) out of service") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MAX_SESSIONS, "rpc open too many session") //common & util TAOS_DEFINE_ERROR(TSDB_CODE_TIME_UNSYNCED, "Client and server's time is not synchronized") @@ -96,6 +97,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TIMEOUT_ERROR, "Operation timeout") TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STARTING, "Database is starting up") TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STOPPING, "Database is closing down") +TAOS_DEFINE_ERROR(TSDB_CODE_IVLD_DATA_FMT, "Invalid data format") //client TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation") diff --git a/source/util/src/thash.c b/source/util/src/thash.c index e9548613aa..926dc304a4 100644 --- a/source/util/src/thash.c +++ b/source/util/src/thash.c @@ -421,7 +421,11 @@ int32_t taosHashGetDup_m(SHashObj *pHashObj, const void *key, size_t keyLen, voi } void *taosHashGetImpl(SHashObj *pHashObj, const void *key, size_t keyLen, void **d, int32_t *size, bool addRef) { - if (pHashObj == NULL || taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) { + if (pHashObj == NULL || keyLen == 0 || key == NULL) { + return NULL; + } + + if ((atomic_load_64((int64_t *)&pHashObj->size) == 0)) { return NULL; } diff --git a/source/util/src/thashutil.c b/source/util/src/thashutil.c index 59f7d389c2..21b9359076 100644 --- a/source/util/src/thashutil.c +++ b/source/util/src/thashutil.c @@ -17,6 +17,7 @@ #include "tcompare.h" #include "thash.h" #include "types.h" +#include "xxhash.h" #define ROTL32(x, r) ((x) << (r) | (x) >> (32u - (r))) @@ -49,6 +50,11 @@ uint32_t taosDJB2Hash(const char *key, uint32_t len) { return hash; } +uint32_t xxHash(const char *key, uint32_t len) { + int32_t seed = 0xcc9e2d51; + return XXH32(key, len, seed); +} + uint32_t MurmurHash3_32(const char *key, uint32_t len) { const uint8_t *data = (const uint8_t *)key; const int32_t nblocks = len >> 2u; @@ -192,8 +198,6 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) { fn = taosIntHash_64; break; case TSDB_DATA_TYPE_BINARY: - fn = MurmurHash3_32; - break; case TSDB_DATA_TYPE_NCHAR: fn = MurmurHash3_32; break; diff --git a/source/util/src/tjson.c b/source/util/src/tjson.c index 48638af8d5..27d14d05b1 100644 --- a/source/util/src/tjson.c +++ b/source/util/src/tjson.c @@ -325,11 +325,10 @@ int32_t tjsonToTArray(const SJson* pJson, const char* pName, FToObject func, SAr const cJSON* jArray = tjsonGetObjectItem(pJson, pName); int32_t size = tjsonGetArraySize(jArray); if (size > 0) { - *pArray = taosArrayInit(size, itemSize); + *pArray = taosArrayInit_s(size, itemSize, size); if (NULL == *pArray) { return TSDB_CODE_OUT_OF_MEMORY; } - taosArraySetSize(*pArray, size); for (int32_t i = 0; i < size; ++i) { int32_t code = func(tjsonGetArrayItem(jArray, i), taosArrayGet(*pArray, i)); if (TSDB_CODE_SUCCESS != code) { diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c index 3cecfdff9c..6bcf4ad39b 100644 --- a/source/util/src/tpagedbuf.c +++ b/source/util/src/tpagedbuf.c @@ -2,7 +2,7 @@ #include "tpagedbuf.h" #include "taoserror.h" #include "tcompression.h" -#include "thash.h" +#include "tsimplehash.h" #include "tlog.h" #define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES) @@ -38,7 +38,7 @@ struct SDiskbasedBuf { int32_t inMemPages; // numOfPages that are allocated in memory SList* freePgList; // free page list SArray* pIdList; // page id list - SHashObj* all; + SSHashObj*all; SList* lruList; void* emptyDummyIdList; // dummy id list void* assistBuf; // assistant buffer for compress/decompress data @@ -374,12 +374,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem goto _error; } - pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES - if (pPBuf->assistBuf == NULL) { - goto _error; - } - - pPBuf->all = taosHashInit(10, fn, true, false); + pPBuf->all = tSimpleHashInit(64, fn); if (pPBuf->all == NULL) { goto _error; } @@ -441,7 +436,7 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) { } // add to hash map - taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES); + tSimpleHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES); pBuf->totalBufSize += pBuf->pageSize; } @@ -466,7 +461,7 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) { pBuf->statis.getPages += 1; - SPageInfo** pi = taosHashGet(pBuf->all, &id, sizeof(int32_t)); + SPageInfo** pi = tSimpleHashGet(pBuf->all, &id, sizeof(int32_t)); if (pi == NULL || *pi == NULL) { uError("failed to locate the buffer page:%d, %s", id, pBuf->id); terrno = TSDB_CODE_INVALID_PARA; @@ -615,7 +610,7 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) { taosArrayDestroy(pBuf->emptyDummyIdList); taosArrayDestroy(pBuf->pFree); - taosHashCleanup(pBuf->all); + tSimpleHashCleanup(pBuf->all); taosMemoryFreeClear(pBuf->id); taosMemoryFreeClear(pBuf->assistBuf); @@ -641,7 +636,12 @@ void setBufPageDirty(void* pPage, bool dirty) { ppi->dirty = dirty; } -void setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp) { pBuf->comp = comp; } +void setBufPageCompressOnDisk(SDiskbasedBuf* pBuf, bool comp) { + pBuf->comp = comp; + if (comp && (pBuf->assistBuf == NULL)) { + pBuf->assistBuf = taosMemoryMalloc(pBuf->pageSize + 2); // EXTRA BYTES + } +} void dBufSetBufPageRecycled(SDiskbasedBuf* pBuf, void* pPage) { SPageInfo* ppi = getPageInfoFromPayload(pPage); @@ -704,7 +704,7 @@ void clearDiskbasedBuf(SDiskbasedBuf* pBuf) { taosArrayClear(pBuf->emptyDummyIdList); taosArrayClear(pBuf->pFree); - taosHashClear(pBuf->all); + tSimpleHashClear(pBuf->all); pBuf->numOfPages = 0; // all pages are in buffer in the first place pBuf->totalBufSize = 0; diff --git a/source/libs/executor/src/tsimplehash.c b/source/util/src/tsimplehash.c similarity index 79% rename from source/libs/executor/src/tsimplehash.c rename to source/util/src/tsimplehash.c index fd6215e3a1..70acffed5d 100644 --- a/source/libs/executor/src/tsimplehash.c +++ b/source/util/src/tsimplehash.c @@ -18,12 +18,13 @@ #include "tlog.h" #include "tdef.h" +#define DEFAULT_BUF_PAGE_SIZE 1024 #define SHASH_DEFAULT_LOAD_FACTOR 0.75 #define HASH_MAX_CAPACITY (1024 * 1024 * 16L) #define SHASH_NEED_RESIZE(_h) ((_h)->size >= (_h)->capacity * SHASH_DEFAULT_LOAD_FACTOR) -#define GET_SHASH_NODE_KEY(_n, _dl) ((char *)(_n) + sizeof(SHNode) + (_dl)) -#define GET_SHASH_NODE_DATA(_n) ((char *)(_n) + sizeof(SHNode)) +#define GET_SHASH_NODE_DATA(_n) (((SHNode*)_n)->data) +#define GET_SHASH_NODE_KEY(_n, _dl) ((char*)GET_SHASH_NODE_DATA(_n) + (_dl)) #define HASH_INDEX(v, c) ((v) & ((c)-1)) @@ -38,6 +39,8 @@ struct SSHashObj { int64_t size; // number of elements in hash table _hash_fn_t hashFp; // hash function _equal_fn_t equalFp; // equal function + SArray* pHashNodeBuf;// hash node allocation buffer, 1k size of each page by default + int32_t offset; // allocation offset in current page }; static FORCE_INLINE int32_t taosHashCapacity(int32_t length) { @@ -57,24 +60,28 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) { capacity = 4; } - SSHashObj *pHashObj = (SSHashObj *)taosMemoryCalloc(1, sizeof(SSHashObj)); + SSHashObj *pHashObj = (SSHashObj *)taosMemoryMalloc(sizeof(SSHashObj)); if (!pHashObj) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } // the max slots is not defined by user - pHashObj->capacity = taosHashCapacity((int32_t)capacity); - - pHashObj->equalFp = memcmp; pHashObj->hashFp = fn; + pHashObj->capacity = taosHashCapacity((int32_t)capacity); + pHashObj->equalFp = memcmp; + pHashObj->pHashNodeBuf = taosArrayInit(10, sizeof(void*)); + pHashObj->offset = 0; + pHashObj->size = 0; + pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *)); if (!pHashObj->hashList) { taosMemoryFree(pHashObj); terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + return pHashObj; } @@ -82,19 +89,53 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) { if (!pHashObj) { return 0; } - return (int32_t)atomic_load_64((int64_t *)&pHashObj->size); + return (int32_t) pHashObj->size; } -static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *data, size_t dataLen, uint32_t hashVal) { - SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dataLen); +static void* doInternalAlloc(SSHashObj* pHashObj, int32_t size) { +#if 0 + void** p = taosArrayGetLast(pHashObj->pHashNodeBuf); + if (p == NULL || (pHashObj->offset + size) > DEFAULT_BUF_PAGE_SIZE) { + // let's allocate one new page + int32_t allocSize = TMAX(size, DEFAULT_BUF_PAGE_SIZE); + void* pNewPage = taosMemoryMalloc(allocSize); + if (pNewPage == NULL) { + return NULL; + } + + // if the allocate the buffer page is greater than the DFFAULT_BUF_PAGE_SIZE, + // pHashObj->offset will always be greater than DEFAULT_BUF_PAGE_SIZE, which means that + // current buffer page is full. And a new buffer page needs to be allocated. + pHashObj->offset = size; + taosArrayPush(pHashObj->pHashNodeBuf, &pNewPage); + return pNewPage; + } else { + void* pPos = (char*)(*p) + pHashObj->offset; + pHashObj->offset += size; + return pPos; + } +#else + return taosMemoryMalloc(size); +#endif +} + +static SHNode *doCreateHashNode(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen, + uint32_t hashVal) { + SHNode *pNewNode = doInternalAlloc(pHashObj, sizeof(SHNode) + keyLen + dataLen); if (!pNewNode) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } + pNewNode->keyLen = keyLen; pNewNode->dataLen = dataLen; pNewNode->next = NULL; - if (data) memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen); + pNewNode->hashVal = hashVal; + + if (data) { + memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen); + } + memcpy(GET_SHASH_NODE_KEY(pNewNode, dataLen), key, keyLen); return pNewNode; } @@ -111,7 +152,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) { return; } - int64_t st = taosGetTimestampUs(); +// int64_t st = taosGetTimestampUs(); void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, POINTER_BYTES * newCapacity); if (!pNewEntryList) { uWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity); @@ -134,10 +175,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) { SHNode *pPrev = NULL; while (pNode != NULL) { - void *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen); - uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pNode->keyLen); - - int32_t newIdx = HASH_INDEX(hashVal, pHashObj->capacity); + int32_t newIdx = HASH_INDEX(pNode->hashVal, pHashObj->capacity); pNext = pNode->next; if (newIdx != idx) { if (!pPrev) { @@ -156,8 +194,7 @@ static void tSimpleHashTableResize(SSHashObj *pHashObj) { } } - int64_t et = taosGetTimestampUs(); - +// int64_t et = taosGetTimestampUs(); // uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", // (int32_t)pHashObj->capacity, // ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); @@ -179,13 +216,13 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons SHNode *pNode = pHashObj->hashList[slot]; if (!pNode) { - SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal); + SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen, hashVal); if (!pNewNode) { return -1; } pHashObj->hashList[slot] = pNewNode; - atomic_add_fetch_64(&pHashObj->size, 1); + pHashObj->size += 1; return 0; } @@ -197,13 +234,13 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, cons } if (!pNode) { - SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal); + SHNode *pNewNode = doCreateHashNode(pHashObj, key, keyLen, data, dataLen, hashVal); if (!pNewNode) { return -1; } pNewNode->next = pHashObj->hashList[slot]; pHashObj->hashList[slot] = pNewNode; - atomic_add_fetch_64(&pHashObj->size, 1); + pHashObj->size += 1; } else if (data) { // update data memcpy(GET_SHASH_NODE_DATA(pNode), data, dataLen); } @@ -270,7 +307,7 @@ int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) { pPrev->next = pNode->next; } FREE_HASH_NODE(pNode); - atomic_sub_fetch_64(&pHashObj->size, 1); + pHashObj->size -= 1; code = TSDB_CODE_SUCCESS; break; } @@ -305,7 +342,7 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke } FREE_HASH_NODE(pNode); - atomic_sub_fetch_64(&pHashObj->size, 1); + pHashObj->size -= 1; break; } pPrev = pNode; @@ -315,6 +352,10 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke return TSDB_CODE_SUCCESS; } +static void destroyItems(void* pItem) { + taosMemoryFree(*(void**)pItem); +} + void tSimpleHashClear(SSHashObj *pHashObj) { if (!pHashObj || taosHashTableEmpty(pHashObj)) { return; @@ -332,9 +373,13 @@ void tSimpleHashClear(SSHashObj *pHashObj) { FREE_HASH_NODE(pNode); pNode = pNext; } + pHashObj->hashList[i] = NULL; } - atomic_store_64(&pHashObj->size, 0); + + taosArrayClearEx(pHashObj->pHashNodeBuf, destroyItems); + pHashObj->offset = 0; + pHashObj->size = 0; } void tSimpleHashCleanup(SSHashObj *pHashObj) { @@ -343,6 +388,7 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) { } tSimpleHashClear(pHashObj); + taosArrayDestroy(pHashObj->pHashNodeBuf); taosMemoryFreeClear(pHashObj->hashList); taosMemoryFree(pHashObj); } diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c index e94f94a00d..55d7d4f6e7 100644 --- a/source/util/src/tutil.c +++ b/source/util/src/tutil.c @@ -159,10 +159,6 @@ char *strtolower(char *dst, const char *src) { int32_t esc = 0; char quote = 0, *p = dst, c; - if (ASSERTS(dst != NULL, "dst is NULL")) { - return NULL; - } - for (c = *src++; c; c = *src++) { if (esc) { esc = 0; @@ -188,10 +184,6 @@ char *strntolower(char *dst, const char *src, int32_t n) { int32_t esc = 0; char quote = 0, *p = dst, c; - if (ASSERTS(dst != NULL, "dst is NULL")) { - return NULL; - } - if (n == 0) { *p = 0; return dst; @@ -219,11 +211,6 @@ char *strntolower(char *dst, const char *src, int32_t n) { char *strntolower_s(char *dst, const char *src, int32_t n) { char *p = dst, c; - - if (ASSERTS(dst != NULL, "dst is NULL")) { - return NULL; - } - if (n == 0) { return NULL; } @@ -333,6 +320,50 @@ char *strbetween(char *string, char *begin, char *end) { return result; } +int32_t tintToHex(uint64_t val, char hex[]) { + const char hexstr[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + int32_t j = 0, k = 0; + if (val == 0) { + hex[j++] = hexstr[0]; + return j; + } + + // ignore the initial 0 + while((val & (((uint64_t)0xfL) << ((15 - k) * 4))) == 0) { + k += 1; + } + + for (j = 0; k < 16; ++k, ++j) { + hex[j] = hexstr[(val & (((uint64_t)0xfL) << ((15 - k) * 4))) >> (15 - k) * 4]; + } + + return j; +} + +int32_t titoa(uint64_t val, size_t radix, char str[]) { + if (radix < 2 || radix > 16) { + return 0; + } + + const char* s = "0123456789abcdef"; + char buf[65] = {0}; + + int32_t i = 0; + uint64_t v = val; + while(v > 0) { + buf[i++] = s[v % radix]; + v /= radix; + } + + // reverse order + for(int32_t j = 0; j < i; ++j) { + str[j] = buf[i - j - 1]; + } + + return i; +} + int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]) { int32_t i; char hexval[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; diff --git a/source/util/src/xxhash.c b/source/util/src/xxhash.c new file mode 100644 index 0000000000..ff28749e31 --- /dev/null +++ b/source/util/src/xxhash.c @@ -0,0 +1,1030 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7S__) )) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. + * When this macro is enabled, xxHash actively checks input for null pointer. + * It it is, result for null input pointers is the same as a null-length input. + */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#endif + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independence be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; + * set it to 0 when the input is guaranteed to be aligned, + * or when alignment doesn't matter for performance. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/*! Modify the local functions below should you wish to use some other memory routines +* for malloc(), free() */ +#include +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/*! and for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#include /* assert */ + +#define XXH_STATIC_LINKING_ONLY +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# define FORCE_INLINE static __forceinline +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; +# endif +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; } __attribute__((packed)) unalign; +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN +static int XXH_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; } /* use after variable declarations */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +/* mix all bits */ +static U32 XXH32_avalanche(U32 h32) +{ + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + return(h32); +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +static U32 +XXH32_finalize(U32 h32, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) + +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1 \ + h32 += (*p++) * PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; + +#define PROCESS4 \ + h32 += XXH_get32bits(p) * PRIME32_3; \ + p+=4; \ + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + + switch(len&15) /* or switch(bEnd - p) */ + { + case 12: PROCESS4; + /* fallthrough */ + case 8: PROCESS4; + /* fallthrough */ + case 4: PROCESS4; + return XXH32_avalanche(h32); + + case 13: PROCESS4; + /* fallthrough */ + case 9: PROCESS4; + /* fallthrough */ + case 5: PROCESS4; + PROCESS1; + return XXH32_avalanche(h32); + + case 14: PROCESS4; + /* fallthrough */ + case 10: PROCESS4; + /* fallthrough */ + case 6: PROCESS4; + PROCESS1; + PROCESS1; + return XXH32_avalanche(h32); + + case 15: PROCESS4; + /* fallthrough */ + case 11: PROCESS4; + /* fallthrough */ + case 7: PROCESS4; + /* fallthrough */ + case 3: PROCESS1; + /* fallthrough */ + case 2: PROCESS1; + /* fallthrough */ + case 1: PROCESS1; + /* fallthrough */ + case 0: return XXH32_avalanche(h32); + } + assert(0); + return h32; /* reaching this point is deemed impossible */ +} + + +FORCE_INLINE U32 +XXH32_endian_align(const void* input, size_t len, U32 seed, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 15; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32)len; + + return XXH32_finalize(h32, p, len&15, endian, align); +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, input, len); + return XXH32_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + + +/*====== Hash streaming ======*/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + + +FORCE_INLINE XXH_errorcode +XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + +FORCE_INLINE U32 +XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, remaining comparable across different systems. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ + +/*====== Memory access ======*/ + +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t U64; +# else + /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ + typedef unsigned long long U64; +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; +static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/*====== xxh64 ======*/ + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +static U64 XXH64_avalanche(U64 h64) +{ + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +static U64 +XXH64_finalize(U64 h64, const void* ptr, size_t len, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)ptr; + +#define PROCESS1_64 \ + h64 ^= (*p++) * PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + +#define PROCESS4_64 \ + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ + p+=4; \ + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + +#define PROCESS8_64 { \ + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ + p+=8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ +} + + switch(len&31) { + case 24: PROCESS8_64; + /* fallthrough */ + case 16: PROCESS8_64; + /* fallthrough */ + case 8: PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: PROCESS8_64; + /* fallthrough */ + case 20: PROCESS8_64; + /* fallthrough */ + case 12: PROCESS8_64; + /* fallthrough */ + case 4: PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: PROCESS8_64; + /* fallthrough */ + case 17: PROCESS8_64; + /* fallthrough */ + case 9: PROCESS8_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: PROCESS8_64; + /* fallthrough */ + case 21: PROCESS8_64; + /* fallthrough */ + case 13: PROCESS8_64; + /* fallthrough */ + case 5: PROCESS4_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: PROCESS8_64; + /* fallthrough */ + case 18: PROCESS8_64; + /* fallthrough */ + case 10: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: PROCESS8_64; + /* fallthrough */ + case 22: PROCESS8_64; + /* fallthrough */ + case 14: PROCESS8_64; + /* fallthrough */ + case 6: PROCESS4_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: PROCESS8_64; + /* fallthrough */ + case 19: PROCESS8_64; + /* fallthrough */ + case 11: PROCESS8_64; + PROCESS1_64; + PROCESS1_64; + PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: PROCESS8_64; + /* fallthrough */ + case 23: PROCESS8_64; + /* fallthrough */ + case 15: PROCESS8_64; + /* fallthrough */ + case 7: PROCESS4_64; + /* fallthrough */ + case 3: PROCESS1_64; + /* fallthrough */ + case 2: PROCESS1_64; + /* fallthrough */ + case 1: PROCESS1_64; + /* fallthrough */ + case 0: return XXH64_avalanche(h64); + } + + /* impossible to reach */ + assert(0); + return 0; /* unreachable, but some compilers complain without it */ +} + +FORCE_INLINE U64 +XXH64_endian_align(const void* input, size_t len, U64 seed, + XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U64 h64; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + return XXH64_finalize(h64, p, len, endian, align); +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, input, len); + return XXH64_digest(&state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + +/*====== Hash Streaming ======*/ + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + +FORCE_INLINE XXH_errorcode +XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 /*seed*/ + PRIME64_5; + } + + h64 += (U64) state->total_len; + + return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); +} + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/*====== Canonical representation ======*/ + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#endif /* XXH_NO_LONG_LONG */ diff --git a/source/util/test/utilTests.cpp b/source/util/test/utilTests.cpp index c56ef348cc..a355125410 100644 --- a/source/util/test/utilTests.cpp +++ b/source/util/test/utilTests.cpp @@ -294,4 +294,32 @@ TEST(utilTest, tstrncspn) { const char* reject5 = "911"; v = tstrncspn(p2, strlen(p2), reject5, 0); ASSERT_EQ(v, 14); +} + +TEST(utilTest, intToHextStr) { + char buf[64] = {0}; + + int64_t v = 0; + tintToHex(0, buf); + ASSERT_STREQ(buf, "0"); + + v = 100000000; + tintToHex(v, buf); + + char destBuf[128]; + sprintf(destBuf, "%" PRIx64, v); + ASSERT_STREQ(buf, destBuf); + + taosSeedRand(taosGetTimestampSec()); + + for(int32_t i = 0; i < 100000; ++i) { + memset(buf, 0, tListLen(buf)); + memset(destBuf, 0, tListLen(destBuf)); + + v = taosRand(); + tintToHex(v, buf); + + sprintf(destBuf, "%" PRIx64, v); + ASSERT_STREQ(buf, destBuf); + } } \ No newline at end of file diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index 3ca7e08cd0..758d28948d 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -14,7 +14,7 @@ class TDTestCase: def init(self, conn, logSql, replicaVer=1): tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), False) + tdSql.init(conn.cursor(), True) self._conn = conn def restartTaosd(self, index=1, dbname="db"): @@ -75,7 +75,7 @@ class TDTestCase: tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;') tdSql.checkRows(3) tdSql.checkData(0, 0, 24) tdSql.checkData(0, 1, 'information_schema') @@ -87,12 +87,12 @@ class TDTestCase: tdSql.checkData(2, 1, 'tbl_count') tdSql.checkData(2, 2, 'stb1') - tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name') + tdSql.query('select count(1) v,db_name from information_schema.ins_tables group by db_name order by v asc') tdSql.checkRows(3) - tdSql.checkData(0, 0, 5) - tdSql.checkData(0, 1, 'performance_schema') - tdSql.checkData(1, 0, 3) - tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(1, 0, 5) + tdSql.checkData(1, 1, 'performance_schema') + tdSql.checkData(0, 0, 3) + tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(2, 0, 24) tdSql.checkData(2, 1, 'information_schema') @@ -177,42 +177,44 @@ class TDTestCase: tdSql.execute('insert into tba1 values (\'2021-11-11 09:00:29\',true, 0,0,0,0,0,0,"000","0000",0,0,0,0);') - tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.query('select count(*) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v;') tdSql.checkRows(4) tdSql.checkData(0, 0, 1) tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(0, 2, 'stba') - tdSql.checkData(1, 0, 24) - tdSql.checkData(1, 1, 'information_schema') - tdSql.checkData(1, 2, None) - tdSql.checkData(2, 0, 3) - tdSql.checkData(2, 1, 'tbl_count') - tdSql.checkData(2, 2, 'stb1') - tdSql.checkData(3, 0, 5) - tdSql.checkData(3, 1, 'performance_schema') + + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(1, 2, 'stb1') + tdSql.checkData(2, 0, 5) + tdSql.checkData(2, 1, 'performance_schema') + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) - tdSql.query('select count(1),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') + tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v;') tdSql.checkRows(4) - tdSql.checkData(0, 0, 24) - tdSql.checkData(0, 1, 'information_schema') - tdSql.checkData(0, 2, None) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 'tbl_count') + tdSql.checkData(0, 2, 'stba') + tdSql.checkData(1, 0, 3) + tdSql.checkData(1, 1, 'tbl_count') + tdSql.checkData(1, 2, 'stb1') + tdSql.checkData(2, 0, 5) + tdSql.checkData(2, 1, 'performance_schema') + tdSql.checkData(2, 2, None) + tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 1, 'information_schema') + tdSql.checkData(3, 2, None) + + tdSql.query('select count(1) v,db_name from information_schema.ins_tables group by db_name order by v') + tdSql.checkRows(3) + + tdSql.checkData(0, 0, 4) + tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(1, 2, None) - tdSql.checkData(2, 0, 1) - tdSql.checkData(2, 1, 'tbl_count') - tdSql.checkData(2, 2, 'stba') - tdSql.checkData(3, 0, 3) - tdSql.checkData(3, 1, 'tbl_count') - tdSql.checkData(3, 2, 'stb1') - - tdSql.query('select count(1),db_name from information_schema.ins_tables group by db_name') - tdSql.checkRows(3) - tdSql.checkData(0, 0, 5) - tdSql.checkData(0, 1, 'performance_schema') - tdSql.checkData(1, 0, 4) - tdSql.checkData(1, 1, 'tbl_count') tdSql.checkData(2, 0, 24) tdSql.checkData(2, 1, 'information_schema') diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py index a6c2062d6c..1ccbb1f7d6 100644 --- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py @@ -11,24 +11,20 @@ # -*- coding: utf-8 -*- -import sys import os from util.log import * from util.cases import * from util.sql import * from util.dnodes import * -import subprocess class TDTestCase: def caseDescription(self): - ''' + """ case1: [TD-14544] taosdump data inspect - ''' - return + """ def init(self, conn, logSql, replicaVar=1): - self.replicaVar = int(replicaVar) tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) self.tmpdir = "tmp" @@ -36,44 +32,56 @@ class TDTestCase: def getPath(self, tool="taosdump"): selfPath = os.path.dirname(os.path.realpath(__file__)) - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] + if "community" in selfPath: + projPath = selfPath[: selfPath.find("community")] + elif "src" in selfPath: + projPath = selfPath[: selfPath.find("src")] + elif "/tools/" in selfPath: + projPath = selfPath[: selfPath.find("/tools/")] + elif "/tests/" in selfPath: + projPath = selfPath[: selfPath.find("/tests/")] else: - projPath = selfPath[:selfPath.find("tests")] + tdLog.info("cannot found %s in path: %s, use system's" % (tool, selfPath)) + projPath = "/usr/local/taos/bin" paths = [] - for root, dirs, files in os.walk(projPath): - if ((tool) in files): + for root, dummy, files in os.walk(projPath): + if (tool) in files: rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): + if "packaging" not in rootRealPath: paths.append(os.path.join(root, tool)) break - if (len(paths) == 0): + if len(paths) == 0: return "" return paths[0] def run(self): - tdSql.prepare(replica=f"{self.replicaVar}") + tdSql.prepare() tdSql.execute("drop database if exists db") - tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + tdSql.execute("create database db keep 3649 ") tdSql.execute("use db") tdSql.execute( - "create table st(ts timestamp, c1 INT, c2 BOOL, c3 TINYINT, c4 SMALLINT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 TIMESTAMP, c9 BINARY(10), c10 NCHAR(10), c11 TINYINT UNSIGNED, c12 SMALLINT UNSIGNED, c13 INT UNSIGNED, c14 BIGINT UNSIGNED) tags(n1 INT, w2 BOOL, t3 TINYINT, t4 SMALLINT, t5 BIGINT, t6 FLOAT, t7 DOUBLE, t8 TIMESTAMP, t9 BINARY(10), t10 NCHAR(10), t11 TINYINT UNSIGNED, t12 SMALLINT UNSIGNED, t13 INT UNSIGNED, t14 BIGINT UNSIGNED)") + "create table st(ts timestamp, c1 INT, c2 BOOL, c3 TINYINT, c4 SMALLINT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 TIMESTAMP, c9 BINARY(10), c10 NCHAR(10), c11 TINYINT UNSIGNED, c12 SMALLINT UNSIGNED, c13 INT UNSIGNED, c14 BIGINT UNSIGNED) tags(n1 INT, w2 BOOL, t3 TINYINT, t4 SMALLINT, t5 BIGINT, t6 FLOAT, t7 DOUBLE, t8 TIMESTAMP, t9 BINARY(10), t10 NCHAR(10), t11 TINYINT UNSIGNED, t12 SMALLINT UNSIGNED, t13 INT UNSIGNED, t14 BIGINT UNSIGNED)" + ) tdSql.execute( - "create table t1 using st tags(1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)") + "create table t1 using st tags(1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)" + ) tdSql.execute( - "insert into t1 values(1640000000000, 1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)") + "insert into t1 values(1640000000000, 1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)" + ) tdSql.execute( - "create table t2 using st tags(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)") + "create table t2 using st tags(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" + ) tdSql.execute( - "insert into t2 values(1640000000000, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)") + "insert into t2 values(1640000000000, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" + ) -# sys.exit(1) + # sys.exit(1) - binPath = self.getPath("taosdump") - if (binPath == ""): + binPath = self.getPath() + if binPath == "": tdLog.exit("taosdump not found!") else: tdLog.info("taosdump found in %s" % binPath) @@ -85,35 +93,73 @@ class TDTestCase: os.system("rm -rf %s" % self.tmpdir) os.makedirs(self.tmpdir) - os.system( - "%s --databases db -o %s -T 1" % - (binPath, self.tmpdir)) + os.system("%s --databases db -o %s -T 1" % (binPath, self.tmpdir)) -# sys.exit(1) + # sys.exit(1) - taosdumpInspectCmd = "%s -I %s/*.avro* -s | grep 'Schema:'|wc -l" % ( - binPath, self.tmpdir) - schemaTimes = subprocess.check_output( - taosdumpInspectCmd, shell=True).decode("utf-8") + taosdumpInspectCmd = "%s -I %s/taosdump.*/*.avro* -s | grep 'Schema:'|wc -l" % ( + binPath, + self.tmpdir, + ) + schemaTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) print("schema found times: %d" % int(schemaTimes)) - if (int(schemaTimes) != 3): + if int(schemaTimes) != 1: caller = inspect.getframeinfo(inspect.stack()[0][0]) tdLog.exit( - "%s(%d) failed: expected schema found times 3, actual %d" % - (caller.filename, caller.lineno, int(schemaTimes))) + "%s(%d) failed: expected schema found times 1, actual %d" + % (caller.filename, caller.lineno, int(schemaTimes)) + ) - taosdumpInspectCmd = "%s -I %s/*.avro* | grep '=== Records:'|wc -l" % ( - binPath, self.tmpdir) - recordsTimes = subprocess.check_output( - taosdumpInspectCmd, shell=True).decode("utf-8") + taosdumpInspectCmd = ( + "%s -I %s/taosdump*/data*/*.avro* -s | grep 'Schema:'|wc -l" + % (binPath, self.tmpdir) + ) + schemaTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) + print("schema found times: %d" % int(schemaTimes)) + + if int(schemaTimes) != 2: + caller = inspect.getframeinfo(inspect.stack()[0][0]) + tdLog.exit( + "%s(%d) failed: expected schema found times 2, actual %d" + % (caller.filename, caller.lineno, int(schemaTimes)) + ) + + taosdumpInspectCmd = ( + "%s -I %s/taosdump*/*.avro* | grep '=== Records:'|wc -l" + % (binPath, self.tmpdir) + ) + recordsTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) print("records found times: %d" % int(recordsTimes)) - if (int(recordsTimes) != 3): + if int(recordsTimes) != 1: caller = inspect.getframeinfo(inspect.stack()[0][0]) tdLog.exit( - "%s(%d) failed: expected records found times 3, actual %d" % - (caller.filename, caller.lineno, int(recordsTimes))) + "%s(%d) failed: expected records found times 1, actual %d" + % (caller.filename, caller.lineno, int(recordsTimes)) + ) + + taosdumpInspectCmd = ( + "%s -I %s/taosdump*/data*/*.avro* | grep '=== Records:'|wc -l" + % (binPath, self.tmpdir) + ) + recordsTimes = subprocess.check_output(taosdumpInspectCmd, shell=True).decode( + "utf-8" + ) + print("records found times: %d" % int(recordsTimes)) + + if int(recordsTimes) != 2: + caller = inspect.getframeinfo(inspect.stack()[0][0]) + tdLog.exit( + "%s(%d) failed: expected records found times 2, actual %d" + % (caller.filename, caller.lineno, int(recordsTimes)) + ) def stop(self): tdSql.close() diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 38c0b3e1ee..63760d6ae4 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -305,7 +305,7 @@ ,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim ,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim ,,y,script,./test.sh -f tsim/vnode/replica3_many.sim -,,y,script,./test.sh -f tsim/vnode/replica3_import.sim +#,,y,script,./test.sh -f tsim/vnode/replica3_import.sim ,,y,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim ,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim ,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim @@ -429,6 +429,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py +,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py @@ -553,6 +554,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py @@ -842,6 +844,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 2 @@ -938,6 +941,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3 @@ -1035,6 +1039,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_last_interval.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsbsQuery.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 4 diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index ff854449bb..d0086c733e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -55,7 +55,7 @@ fi date docker run \ -v $REP_MOUNT_PARAM \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true;make -j || exit 1" + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_TAOSX=true;make -j || exit 1" if [[ -d ${WORKDIR}/debugNoSan ]] ;then echo "delete ${WORKDIR}/debugNoSan" @@ -70,7 +70,7 @@ mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugNoSan date docker run \ -v $REP_MOUNT_PARAM \ - --rm --ulimit core=-1 taos_test:v1.0 sh -c "cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true;make -j || exit 1 " + --rm --ulimit core=-1 taos_test:v1.0 sh -c "pip uninstall taospy -y;pip3 install taospy==2.7.2;cd $REP_DIR;rm -rf debug;mkdir -p debug;cd debug;cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_TEST=true -DWEBSOCKET=true -DBUILD_SANITIZER=1 -DTOOLS_SANITIZE=true -DTOOLS_BUILD_TYPE=Debug -DBUILD_TAOSX=true;make -j || exit 1 " mv ${REP_REAL_PATH}/debug ${WORKDIR}/debugSan diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 3c39b05e69..ec588659e9 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -18,7 +18,8 @@ from __future__ import annotations from typing import Any, Set, Tuple from typing import Dict from typing import List -from typing import Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none +from typing import \ + Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none import textwrap import time @@ -39,7 +40,6 @@ import gc import taos from taos.tmq import * - from .shared.types import TdColumns, TdTags # from crash_gen import ServiceManager, TdeInstance, TdeSubProcess @@ -65,10 +65,11 @@ if sys.version_info[0] < 3: # Command-line/Environment Configurations, will set a bit later # ConfigNameSpace = argparse.Namespace # gConfig: argparse.Namespace -gSvcMgr: Optional[ServiceManager] # TODO: refactor this hack, use dep injection +gSvcMgr: Optional[ServiceManager] # TODO: refactor this hack, use dep injection # logger: logging.Logger gContainer: Container + # def runThread(wt: WorkerThread): # wt.run() @@ -77,7 +78,7 @@ class WorkerThread: def __init__(self, pool: ThreadPool, tid, tc: ThreadCoordinator): """ Note: this runs in the main thread context - """ + """ # self._curStep = -1 self._pool = pool self._tid = tid @@ -91,15 +92,15 @@ class WorkerThread: if (Config.getConfig().per_thread_db_connection): # type: ignore # print("connector_type = {}".format(gConfig.connector_type)) tInst = gContainer.defTdeInstance - if Config.getConfig().connector_type == 'native': - self._dbConn = DbConn.createNative(tInst.getDbTarget()) + if Config.getConfig().connector_type == 'native': + self._dbConn = DbConn.createNative(tInst.getDbTarget()) elif Config.getConfig().connector_type == 'rest': - self._dbConn = DbConn.createRest(tInst.getDbTarget()) + self._dbConn = DbConn.createRest(tInst.getDbTarget()) elif Config.getConfig().connector_type == 'mixed': - if Dice.throw(2) == 0: # 1/2 chance - self._dbConn = DbConn.createNative(tInst.getDbTarget()) + if Dice.throw(2) == 0: # 1/2 chance + self._dbConn = DbConn.createNative(tInst.getDbTarget()) else: - self._dbConn = DbConn.createRest(tInst.getDbTarget()) + self._dbConn = DbConn.createRest(tInst.getDbTarget()) else: raise RuntimeError("Unexpected connector type: {}".format(Config.getConfig().connector_type)) @@ -138,7 +139,7 @@ class WorkerThread: # clean up if (Config.getConfig().per_thread_db_connection): # type: ignore - if self._dbConn.isOpen: #sometimes it is not open + if self._dbConn.isOpen: # sometimes it is not open self._dbConn.close() else: Logging.warning("Cleaning up worker thread, dbConn already closed") @@ -150,20 +151,19 @@ class WorkerThread: tc = self._tc # Thread Coordinator, the overall master try: tc.crossStepBarrier() # shared barrier first, INCLUDING the last one - except threading.BrokenBarrierError as err: # main thread timed out + except threading.BrokenBarrierError as err: # main thread timed out print("_bto", end="") Logging.debug("[TRD] Worker thread exiting due to main thread barrier time-out") break Logging.debug("[TRD] Worker thread [{}] exited barrier...".format(self._tid)) - self.crossStepGate() # then per-thread gate, after being tapped + self.crossStepGate() # then per-thread gate, after being tapped Logging.debug("[TRD] Worker thread [{}] exited step gate...".format(self._tid)) if not self._tc.isRunning(): print("_wts", end="") Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break - # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) try: if (Config.getConfig().per_thread_db_connection): # most likely TRUE @@ -172,7 +172,8 @@ class WorkerThread: # self.useDb() # might encounter exceptions. TODO: catch except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [0x383, 0x386, 0x00B, 0x014] : # invalid database, dropping, Unable to establish connection, Database not ready + if errno in [0x383, 0x386, 0x00B, + 0x014]: # invalid database, dropping, Unable to establish connection, Database not ready # ignore dummy = 0 else: @@ -180,12 +181,12 @@ class WorkerThread: raise # Fetch a task from the Thread Coordinator - Logging.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid)) + Logging.debug("[TRD] Worker thread [{}] about to fetch task".format(self._tid)) task = tc.fetchTask() # Execute such a task Logging.debug("[TRD] Worker thread [{}] about to execute task: {}".format( - self._tid, task.__class__.__name__)) + self._tid, task.__class__.__name__)) task.execute(self) tc.saveExecutedTask(task) Logging.debug("[TRD] Worker thread [{}] finished executing task".format(self._tid)) @@ -228,7 +229,7 @@ class WorkerThread: self._stepGate.set() # wake up! time.sleep(0) # let the released thread run a bit else: - print("_tad", end="") # Thread already dead + print("_tad", end="") # Thread already dead def execSql(self, sql): # TODO: expose DbConn directly return self.getDbConn().execute(sql) @@ -239,7 +240,7 @@ class WorkerThread: def getQueryResult(self): return self.getDbConn().getQueryResult() - def getDbConn(self) -> DbConn : + def getDbConn(self) -> DbConn: if (Config.getConfig().per_thread_db_connection): return self._dbConn else: @@ -251,6 +252,7 @@ class WorkerThread: # else: # return self._tc.getDbState().getDbConn().query(sql) + # The coordinator of all worker threads, mostly running in main thread @@ -262,7 +264,7 @@ class ThreadCoordinator: self._pool = pool # self._wd = wd self._te = None # prepare for every new step - self._dbManager = dbManager # type: Optional[DbManager] # may be freed + self._dbManager = dbManager # type: Optional[DbManager] # may be freed self._executedTasks: List[Task] = [] # in a given step self._lock = threading.RLock() # sync access for a few things @@ -284,7 +286,7 @@ class ThreadCoordinator: return self._dbManager def crossStepBarrier(self, timeout=None): - self._stepBarrier.wait(timeout) + self._stepBarrier.wait(timeout) def requestToStop(self): self._runStatus = Status.STATUS_STOPPING @@ -292,7 +294,7 @@ class ThreadCoordinator: def _runShouldEnd(self, transitionFailed, hasAbortedTask, workerTimeout): maxSteps = Config.getConfig().max_steps # type: ignore - if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9 + if self._curStep >= (maxSteps - 1): # maxStep==10, last curStep should be 9 return True if self._runStatus != Status.STATUS_RUNNING: return True @@ -304,7 +306,7 @@ class ThreadCoordinator: return True return False - def _hasAbortedTask(self): # from execution of previous step + def _hasAbortedTask(self): # from execution of previous step for task in self._executedTasks: if task.isAborted(): # print("Task aborted: {}".format(task)) @@ -319,17 +321,17 @@ class ThreadCoordinator: "--\r\n\n--> Step {} starts with main thread waking up".format(self._curStep)) # A new TE for the new step - self._te = None # set to empty first, to signal worker thread to stop + self._te = None # set to empty first, to signal worker thread to stop if not transitionFailed: # only if not failed self._te = TaskExecutor(self._curStep) Logging.debug("[TRD] Main thread waking up at step {}, tapping worker threads".format( - self._curStep)) # Now not all threads had time to go to sleep + self._curStep)) # Now not all threads had time to go to sleep # Worker threads will wake up at this point, and each execute it's own task - self.tapAllThreads() # release all worker thread from their "gates" + self.tapAllThreads() # release all worker thread from their "gates" def _syncAtBarrier(self): - # Now main thread (that's us) is ready to enter a step + # Now main thread (that's us) is ready to enter a step # let other threads go past the pool barrier, but wait at the # thread gate Logging.debug("[TRD] Main thread about to cross the barrier") @@ -341,7 +343,7 @@ class ThreadCoordinator: transitionFailed = False try: for x in self._dbs: - db = x # type: Database + db = x # type: Database sm = db.getStateMachine() Logging.debug("[STT] starting transitions for DB: {}".format(db.getName())) # at end of step, transiton the DB state @@ -357,8 +359,8 @@ class ThreadCoordinator: # for t in self._pool.threadList: # Logging.debug("[DB] use db for all worker threads") # t.useDb() - # t.execSql("use db") # main thread executing "use - # db" on behalf of every worker thread + # t.execSql("use db") # main thread executing "use + # db" on behalf of every worker thread except taos.error.ProgrammingError as err: if (err.msg == 'network unavailable'): # broken DB connection @@ -369,12 +371,13 @@ class ThreadCoordinator: self._execStats.registerFailure("Broken DB Connection") # continue # don't do that, need to tap all threads at # end, and maybe signal them to stop - if isinstance(err, CrashGenError): # our own transition failure + if isinstance(err, CrashGenError): # our own transition failure Logging.info("State transition error") # TODO: saw an error here once, let's print out stack info for err? - traceback.print_stack() # Stack frame to here. + traceback.print_stack() # Stack frame to here. Logging.info("Caused by:") - traceback.print_exception(*sys.exc_info()) # Ref: https://www.geeksforgeeks.org/how-to-print-exception-stack-trace-in-python/ + traceback.print_exception( + *sys.exc_info()) # Ref: https://www.geeksforgeeks.org/how-to-print-exception-stack-trace-in-python/ transitionFailed = True self._te = None # Not running any more self._execStats.registerFailure("State transition error: {}".format(err)) @@ -392,14 +395,14 @@ class ThreadCoordinator: # Coordinate all threads step by step self._curStep = -1 # not started yet - + self._execStats.startExec() # start the stop watch transitionFailed = False hasAbortedTask = False workerTimeout = False while not self._runShouldEnd(transitionFailed, hasAbortedTask, workerTimeout): - if not Config.getConfig().debug: # print this only if we are not in debug mode - Progress.emit(Progress.STEP_BOUNDARY) + if not Config.getConfig().debug: # print this only if we are not in debug mode + Progress.emit(Progress.STEP_BOUNDARY) # print(".", end="", flush=True) # if (self._curStep % 2) == 0: # print memory usage once every 10 steps # memUsage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss @@ -408,15 +411,14 @@ class ThreadCoordinator: # h = hpy() # print("\n") # print(h.heap()) - - + try: - self._syncAtBarrier() # For now just cross the barrier + self._syncAtBarrier() # For now just cross the barrier Progress.emit(Progress.END_THREAD_STEP) - if self._stepStartTime : + if self._stepStartTime: stepExecTime = time.time() - self._stepStartTime Progress.emitStr('{:.3f}s/{}'.format(stepExecTime, DbConnNative.totalRequests)) - DbConnNative.resetTotalRequests() # reset to zero + DbConnNative.resetTotalRequests() # reset to zero except threading.BrokenBarrierError as err: self._execStats.registerFailure("Aborted due to worker thread timeout") Logging.error("\n") @@ -439,15 +441,15 @@ class ThreadCoordinator: # At this point, all threads should be pass the overall "barrier" and before the per-thread "gate" # We use this period to do house keeping work, when all worker # threads are QUIET. - hasAbortedTask = self._hasAbortedTask() # from previous step - if hasAbortedTask: + hasAbortedTask = self._hasAbortedTask() # from previous step + if hasAbortedTask: Logging.info("Aborted task encountered, exiting test program") self._execStats.registerFailure("Aborted Task Encountered") - break # do transition only if tasks are error free + break # do transition only if tasks are error free # Ending previous step try: - transitionFailed = self._doTransition() # To start, we end step -1 first + transitionFailed = self._doTransition() # To start, we end step -1 first except taos.error.ProgrammingError as err: transitionFailed = True errno2 = Helper.convertErrno(err.errno) # correct error scheme @@ -459,32 +461,32 @@ class ThreadCoordinator: # Then we move on to the next step Progress.emit(Progress.BEGIN_THREAD_STEP) self._stepStartTime = time.time() - self._releaseAllWorkerThreads(transitionFailed) + self._releaseAllWorkerThreads(transitionFailed) - if hasAbortedTask or transitionFailed : # abnormal ending, workers waiting at "gate" + if hasAbortedTask or transitionFailed: # abnormal ending, workers waiting at "gate" Logging.debug("Abnormal ending of main thraed") elif workerTimeout: Logging.debug("Abnormal ending of main thread, due to worker timeout") - else: # regular ending, workers waiting at "barrier" + else: # regular ending, workers waiting at "barrier" Logging.debug("Regular ending, main thread waiting for all worker threads to stop...") self._syncAtBarrier() self._te = None # No more executor, time to end Logging.debug("Main thread tapping all threads one last time...") self.tapAllThreads() # Let the threads run one last time - #TODO: looks like we are not capturing the failures for the last step yet (i.e. calling registerFailure if neccessary) + # TODO: looks like we are not capturing the failures for the last step yet (i.e. calling registerFailure if neccessary) Logging.debug("\r\n\n--> Main thread ready to finish up...") Logging.debug("Main thread joining all threads") self._pool.joinAll() # Get all threads to finish - Logging.info(". . . All worker threads finished") # No CR/LF before + Logging.info(". . . All worker threads finished") # No CR/LF before self._execStats.endExec() - def cleanup(self): # free resources + def cleanup(self): # free resources self._pool.cleanup() self._pool = None - self._te = None + self._te = None self._dbManager = None self._executedTasks = [] self._lock = None @@ -492,7 +494,6 @@ class ThreadCoordinator: self._execStats = None self._runStatus = None - def printStats(self): self._execStats.printStats() @@ -523,21 +524,21 @@ class ThreadCoordinator: def _initDbs(self): ''' Initialize multiple databases, invoked at __ini__() time ''' - self._dbs = [] # type: List[Database] + self._dbs = [] # type: List[Database] dbc = self.getDbManager().getDbConn() if Config.getConfig().max_dbs == 0: self._dbs.append(Database(0, dbc)) - else: - baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic - )*333) % 888 if Config.getConfig().dynamic_db_table_names else 0 + else: + baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic + ) * 333) % 888 if Config.getConfig().dynamic_db_table_names else 0 for i in range(Config.getConfig().max_dbs): self._dbs.append(Database(baseDbNumber + i, dbc)) def pickDatabase(self): idxDb = 0 - if Config.getConfig().max_dbs != 0 : - idxDb = Dice.throw(Config.getConfig().max_dbs) # 0 to N-1 - db = self._dbs[idxDb] # type: Database + if Config.getConfig().max_dbs != 0: + idxDb = Dice.throw(Config.getConfig().max_dbs) # 0 to N-1 + db = self._dbs[idxDb] # type: Database return db def fetchTask(self) -> Task: @@ -549,12 +550,12 @@ class ThreadCoordinator: # pick a task type for current state db = self.pickDatabase() - if Dice.throw(2)==1: - taskType = db.getStateMachine().pickTaskType() # dynamic name of class + if Dice.throw(2) == 1: + taskType = db.getStateMachine().pickTaskType() # dynamic name of class else: - taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types + taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types pass - + return taskType(self._execStats, db) # create a task from it def resetExecutedTasks(self): @@ -564,6 +565,7 @@ class ThreadCoordinator: with self._lock: self._executedTasks.append(task) + class ThreadPool: def __init__(self, numThreads, maxSteps): self.numThreads = numThreads @@ -585,7 +587,8 @@ class ThreadPool: workerThread._thread.join() def cleanup(self): - self.threadList = [] # maybe clean up each? + self.threadList = [] # maybe clean up each? + # A queue of continguous POSITIVE integers, used by DbManager to generate continuous numbers # for new table names @@ -680,11 +683,11 @@ class AnyState: CAN_CREATE_DB = 1 # For below, if we can "drop the DB", but strictly speaking # only "under normal circumstances", as we may override it with the -b option - CAN_DROP_DB = 2 + CAN_DROP_DB = 2 CAN_CREATE_FIXED_SUPER_TABLE = 3 CAN_CREATE_STREAM = 3 # super table must exists CAN_CREATE_TOPIC = 3 # super table must exists - CAN_CREATE_CONSUMERS = 3 + CAN_CREATE_CONSUMERS = 3 CAN_DROP_FIXED_SUPER_TABLE = 4 CAN_DROP_TOPIC = 4 CAN_DROP_STREAM = 4 @@ -729,7 +732,7 @@ class AnyState: def canDropDb(self): # If user requests to run up to a number of DBs, # we'd then not do drop_db operations any more - if Config.getConfig().max_dbs > 0 or Config.getConfig().use_shadow_db : + if Config.getConfig().max_dbs > 0 or Config.getConfig().use_shadow_db: return False return self._info[self.CAN_DROP_DB] @@ -737,19 +740,19 @@ class AnyState: return self._info[self.CAN_CREATE_FIXED_SUPER_TABLE] def canDropFixedSuperTable(self): - if Config.getConfig().use_shadow_db: # duplicate writes to shaddow DB, in which case let's disable dropping s-table + if Config.getConfig().use_shadow_db: # duplicate writes to shaddow DB, in which case let's disable dropping s-table return False return self._info[self.CAN_DROP_FIXED_SUPER_TABLE] def canCreateTopic(self): return self._info[self.CAN_CREATE_TOPIC] - + def canDropTopic(self): return self._info[self.CAN_DROP_TOPIC] def canCreateConsumers(self): return self._info[self.CAN_CREATE_CONSUMERS] - + def canCreateStreams(self): return self._info[self.CAN_CREATE_STREAM] @@ -777,7 +780,7 @@ class AnyState: raise CrashGenError( "Unexpected more than 1 success at state: {}, with task: {}, in task set: {}".format( self.__class__.__name__, - cls.__name__, # verified just now that isinstance(task, cls) + cls.__name__, # verified just now that isinstance(task, cls) [c.__class__.__name__ for c in tasks] )) @@ -792,16 +795,17 @@ class AnyState: sCnt += 1 if (exists and sCnt <= 0): raise CrashGenError("Unexpected zero success at state: {}, with task: {}, in task set: {}".format( - self.__class__.__name__, - cls.__name__, # verified just now that isinstance(task, cls) - [c.__class__.__name__ for c in tasks] - )) + self.__class__.__name__, + cls.__name__, # verified just now that isinstance(task, cls) + [c.__class__.__name__ for c in tasks] + )) def assertNoTask(self, tasks, cls): for task in tasks: if isinstance(task, cls): raise CrashGenError( - "This task: {}, is not expected to be present, given the success/failure of others".format(cls.__name__)) + "This task: {}, is not expected to be present, given the success/failure of others".format( + cls.__name__)) def assertNoSuccess(self, tasks, cls): for task in tasks: @@ -848,7 +852,7 @@ class StateEmpty(AnyState): def verifyTasksToState(self, tasks, newState): if (self.hasSuccess(tasks, TaskCreateDb) - ): # at EMPTY, if there's succes in creating DB + ): # at EMPTY, if there's succes in creating DB if (not self.hasTask(tasks, TaskDropDb)): # and no drop_db tasks # we must have at most one. TODO: compare numbers self.assertAtMostOneSuccess(tasks, TaskCreateDb) @@ -885,19 +889,19 @@ class StateSuperTableOnly(AnyState): def verifyTasksToState(self, tasks, newState): if (self.hasSuccess(tasks, TaskDropSuperTable) - ): # we are able to drop the table - #self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) + ): # we are able to drop the table + # self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) # we must have had recreted it self.hasSuccess(tasks, TaskCreateSuperTable) # self._state = self.STATE_DB_ONLY # elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # no success dropping the table, but added data # self.assertNoTask(tasks, DropFixedTableTask) # not true in massively parrallel cases - # self._state = self.STATE_HAS_DATA + # self._state = self.STATE_HAS_DATA # elif ( self.hasSuccess(tasks, ReadFixedDataTask) ): # no success in prev cases, but was able to read data - # self.assertNoTask(tasks, DropFixedTableTask) - # self.assertNoTask(tasks, AddFixedDataTask) - # self._state = self.STATE_TABLE_ONLY # no change + # self.assertNoTask(tasks, DropFixedTableTask) + # self.assertNoTask(tasks, AddFixedDataTask) + # self._state = self.STATE_TABLE_ONLY # no change # else: # did not drop table, did not insert data, did not read successfully, that is impossible # raise RuntimeError("Unexpected no-success scenarios") # TODO: need to revamp!! @@ -919,41 +923,41 @@ class StateHasData(AnyState): self.assertAtMostOneSuccess(tasks, TaskDropDb) # TODO: dicy elif (newState.equals(AnyState.STATE_DB_ONLY)): # in DB only if (not self.hasTask(tasks, TaskCreateDb) - ): # without a create_db task + ): # without a create_db task # we must have drop_db task self.assertNoTask(tasks, TaskDropDb) self.hasSuccess(tasks, TaskDropSuperTable) # self.assertAtMostOneSuccess(tasks, DropFixedSuperTableTask) # TODO: dicy # elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted - # self.assertNoTask(tasks, TaskDropDb) - # self.assertNoTask(tasks, TaskDropSuperTable) - # self.assertNoTask(tasks, TaskAddData) - # self.hasSuccess(tasks, DeleteDataTasks) + # self.assertNoTask(tasks, TaskDropDb) + # self.assertNoTask(tasks, TaskDropSuperTable) + # self.assertNoTask(tasks, TaskAddData) + # self.hasSuccess(tasks, DeleteDataTasks) else: # should be STATE_HAS_DATA if (not self.hasTask(tasks, TaskCreateDb) - ): # only if we didn't create one + ): # only if we didn't create one # we shouldn't have dropped it self.assertNoTask(tasks, TaskDropDb) - if not( self.hasTask(tasks, TaskCreateSuperTable) - ): # if we didn't create the table + if not (self.hasTask(tasks, TaskCreateSuperTable) + ): # if we didn't create the table # we should not have a task that drops it self.assertNoTask(tasks, TaskDropSuperTable) # self.assertIfExistThenSuccess(tasks, ReadFixedDataTask) class StateMechine: - def __init__(self, db: Database): + def __init__(self, db: Database): self._db = db # transitition target probabilities, indexed with value of STATE_EMPTY, STATE_DB_ONLY, etc. self._stateWeights = [1, 2, 10, 40] - def init(self, dbc: DbConn): # late initailization, don't save the dbConn + def init(self, dbc: DbConn): # late initailization, don't save the dbConn try: self._curState = self._findCurrentState(dbc) # starting state - except taos.error.ProgrammingError as err: + except taos.error.ProgrammingError as err: Logging.error("Failed to initialized state machine, cannot find current state: {}".format(err)) traceback.print_stack() - raise # re-throw + raise # re-throw # TODO: seems no lnoger used, remove? def getCurrentState(self): @@ -999,28 +1003,27 @@ class StateMechine: def _findCurrentState(self, dbc: DbConn): ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state - dbName =self._db.getName() - if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?! - Logging.debug( "[STT] empty database found, between {} and {}".format(ts, time.time())) + dbName = self._db.getName() + if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?! + Logging.debug("[STT] empty database found, between {} and {}".format(ts, time.time())) return StateEmpty() # did not do this when openning connection, and this is NOT the worker # thread, which does this on their own dbc.use(dbName) - + if not dbc.hasTables(): # no tables - + Logging.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time())) return StateDbOnly() # For sure we have tables, which means we must have the super table. # TODO: are we sure? - + sTable = self._db.getFixedSuperTable() - if sTable.hasRegTables(dbc): # no regular tables # print("debug=====*\n"*100) Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) - + return StateSuperTableOnly() else: # has actual tables Logging.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time())) @@ -1029,7 +1032,7 @@ class StateMechine: # We transition the system to a new state by examining the current state itself def transition(self, tasks, dbc: DbConn): global gSvcMgr - + if (len(tasks) == 0): # before 1st step, or otherwise empty Logging.debug("[STT] Starting State: {}".format(self._curState)) return # do nothing @@ -1038,39 +1041,39 @@ class StateMechine: dbc.execute("select * from information_schema.ins_dnodes") # Generic Checks, first based on the start state - if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. + if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. if self._curState.canCreateDb(): self._curState.assertIfExistThenSuccess(tasks, TaskCreateDb) # self.assertAtMostOneSuccess(tasks, CreateDbTask) # not really, in # case of multiple creation and drops if self._curState.canDropDb(): - if gSvcMgr == None: # only if we are running as client-only + if gSvcMgr == None: # only if we are running as client-only self._curState.assertIfExistThenSuccess(tasks, TaskDropDb) # self.assertAtMostOneSuccess(tasks, DropDbTask) # not really in # case of drop-create-drop # if self._state.canCreateFixedTable(): - # self.assertIfExistThenSuccess(tasks, CreateFixedTableTask) # Not true, DB may be dropped - # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not - # really, in case of create-drop-create + # self.assertIfExistThenSuccess(tasks, CreateFixedTableTask) # Not true, DB may be dropped + # self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not + # really, in case of create-drop-create # if self._state.canDropFixedTable(): - # self.assertIfExistThenSuccess(tasks, DropFixedTableTask) # Not True, the whole DB may be dropped - # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not - # really in case of drop-create-drop + # self.assertIfExistThenSuccess(tasks, DropFixedTableTask) # Not True, the whole DB may be dropped + # self.assertAtMostOneSuccess(tasks, DropFixedTableTask) # not + # really in case of drop-create-drop # if self._state.canAddData(): # self.assertIfExistThenSuccess(tasks, AddFixedDataTask) # not true # actually # if self._state.canReadData(): - # Nothing for sure + # Nothing for sure newState = self._findCurrentState(dbc) Logging.debug("[STT] New DB state determined: {}".format(newState)) # can old state move to new state through the tasks? - if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. + if not Config.getConfig().ignore_errors: # verify state, only if we are asked not to ignore certain errors. self._curState.verifyTasksToState(tasks, newState) self._curState = newState @@ -1096,22 +1099,24 @@ class StateMechine: weightsTypes = BasicTypes.copy() # this matrixs can balance the Frequency of TaskTypes - balance_TaskType_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 , - 'TaskReadData':50 , 'TaskDropSuperTable':5 , 'TaskAlterTags':3 , 'TaskAddData':10, - 'TaskDeleteData':10 , 'TaskCreateDb':10 , 'TaskCreateStream': 3, 'TaskCreateTopic' :3, - 'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # TaskType : balance_matrixs of task - - for task , weights in balance_TaskType_matrixs.items(): - + balance_TaskType_matrixs = {'TaskDropDb': 5, 'TaskDropTopics': 20, 'TaskDropStreams': 10, + 'TaskDropStreamTables': 10, + 'TaskReadData': 50, 'TaskDropSuperTable': 5, 'TaskAlterTags': 3, 'TaskAddData': 10, + 'TaskDeleteData': 10, 'TaskCreateDb': 10, 'TaskCreateStream': 3, + 'TaskCreateTopic': 3, + 'TaskCreateConsumers': 10, + 'TaskCreateSuperTable': 10} # TaskType : balance_matrixs of task + + for task, weights in balance_TaskType_matrixs.items(): + for basicType in BasicTypes: if basicType.__name__ == task: for _ in range(weights): weightsTypes.append(basicType) - task = random.sample(weightsTypes,1) + task = random.sample(weightsTypes, 1) return task[0] - # ref: # https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ def _weighted_choice_sub(self, weights) -> int: @@ -1123,6 +1128,7 @@ class StateMechine: return i raise CrashGenError("Unexpected no choice") + class Database: ''' We use this to represent an actual TDengine database inside a service instance, possibly in a cluster environment. @@ -1131,16 +1137,16 @@ class Database: TODO: consider moving, but keep in mind it contains "StateMachine" ''' - _clsLock = threading.Lock() # class wide lock + _clsLock = threading.Lock() # class wide lock _lastInt = 101 # next one is initial integer - _lastTick = None # Optional[datetime] - _lastLaggingTick = None # Optional[datetime] # lagging tick, for out-of-sequence (oos) data insertions + _lastTick = None # Optional[datetime] + _lastLaggingTick = None # Optional[datetime] # lagging tick, for out-of-sequence (oos) data insertions - def __init__(self, dbNum: int, dbc: DbConn): # TODO: remove dbc - self._dbNum = dbNum # we assign a number to databases, for our testing purpose + def __init__(self, dbNum: int, dbc: DbConn): # TODO: remove dbc + self._dbNum = dbNum # we assign a number to databases, for our testing purpose self._stateMachine = StateMechine(self) self._stateMachine.init(dbc) - + self._lock = threading.RLock() def getStateMachine(self) -> StateMechine: @@ -1152,7 +1158,7 @@ class Database: def getName(self): return "db_{}".format(self._dbNum) - def filterTasks(self, inTasks: List[Task]): # Pick out those belonging to us + def filterTasks(self, inTasks: List[Task]): # Pick out those belonging to us outTasks = [] for task in inTasks: if task.getDb().isSame(self): @@ -1184,38 +1190,42 @@ class Database: # start time will be auto generated , start at 10 years ago local time local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16] local_epoch_time = [int(i) for i in local_time.split("-")] - #local_epoch_time will be such as : [2022, 7, 18] + # local_epoch_time will be such as : [2022, 7, 18] - t1 = datetime.datetime(local_epoch_time[0]-5, local_epoch_time[1], local_epoch_time[2]) + t1 = datetime.datetime(local_epoch_time[0] - 5, local_epoch_time[1], local_epoch_time[2]) t2 = datetime.datetime.now() # maybe a very large number, takes 69 years to exceed Python int range elSec = int(t2.timestamp() - t1.timestamp()) elSec2 = (elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500)) * \ - 500 # a number representing seconds within 10 years + 500 # a number representing seconds within 10 years # print("elSec = {}".format(elSec)) - t3 = datetime.datetime(local_epoch_time[0]-10, local_epoch_time[1], local_epoch_time[2]) # default "keep" is 10 years + t3 = datetime.datetime(local_epoch_time[0] - 10, local_epoch_time[1], + local_epoch_time[2]) # default "keep" is 10 years t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec2) # see explanation above Logging.debug("Setting up TICKS to start from: {}".format(t4)) return t4 @classmethod - def getNextTick(cls): + def getNextTick(cls): ''' Fetch a timestamp tick, with some random factor, may not be unique. - ''' + ''' with cls._clsLock: # prevent duplicate tick - if cls._lastLaggingTick is None or cls._lastTick is None : # not initialized + if cls._lastLaggingTick is None or cls._lastTick is None: # not initialized # 10k at 1/20 chance, should be enough to avoid overlaps tick = cls.setupLastTick() cls._lastTick = tick - cls._lastLaggingTick = tick + datetime.timedelta(0, -60*2) # lagging behind 2 minutes, should catch up fast + cls._lastLaggingTick = tick + datetime.timedelta(0, + -60 * 2) # lagging behind 2 minutes, should catch up fast # if : # should be quite a bit into the future - if Config.isSet('mix_oos_data') and Dice.throw(20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick - cls._lastLaggingTick += datetime.timedelta(0, 1) # pick the next sequence from the lagging tick sequence - return cls._lastLaggingTick + if Config.isSet('mix_oos_data') and Dice.throw( + 20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick + cls._lastLaggingTick += datetime.timedelta(0, + 1) # pick the next sequence from the lagging tick sequence + return cls._lastLaggingTick else: # regular # add one second to it cls._lastTick += datetime.timedelta(0, 1) @@ -1332,9 +1342,7 @@ class Task(): # Logging.debug("Creating new task {}...".format(self._taskNum)) self._execStats = execStats - self._db = db # A task is always associated/for a specific DB - - + self._db = db # A task is always associated/for a specific DB def isSuccess(self): return self._err is None @@ -1367,82 +1375,78 @@ class Task(): def _isServiceStable(self): if not gSvcMgr: return True # we don't run service, so let's assume it's stable - return gSvcMgr.isStable() # otherwise let's examine the service + return gSvcMgr.isStable() # otherwise let's examine the service def _isErrAcceptable(self, errno, msg): if errno in [ - # TDengine 2.x Error Codes: - 0x05, # TSDB_CODE_RPC_NOT_READY - 0x0B, # Unable to establish connection, more details in TD-1648 - # 0x200, # invalid SQL, TODO: re-examine with TD-934 - 0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776 - 0x213, # "Disconnected from service", result of "kill connection ???" - 0x217, # "db not selected", client side defined error code - # 0x218, # "Table does not exist" client side defined error code - 0x360, # Table already exists - 0x362, - # 0x369, # tag already exists - 0x36A, 0x36B, 0x36D, - 0x381, - 0x380, # "db not selected" - 0x383, - 0x386, # DB is being dropped?! - 0x503, - 0x510, # vnode not in ready state - 0x14, # db not ready, errno changed - 0x600, # Invalid table ID, why? - 0x218, # Table does not exist + # TDengine 2.x Error Codes: + 0x05, # TSDB_CODE_RPC_NOT_READY + 0x0B, # Unable to establish connection, more details in TD-1648 + # 0x200, # invalid SQL, TODO: re-examine with TD-934 + 0x20F, # query terminated, possibly due to vnoding being dropped, see TD-1776 + 0x213, # "Disconnected from service", result of "kill connection ???" + 0x217, # "db not selected", client side defined error code + # 0x218, # "Table does not exist" client side defined error code + 0x360, # Table already exists + 0x362, + # 0x369, # tag already exists + 0x36A, 0x36B, 0x36D, + 0x381, + 0x380, # "db not selected" + 0x383, + 0x386, # DB is being dropped?! + 0x503, + 0x510, # vnode not in ready state + 0x14, # db not ready, errno changed + 0x600, # Invalid table ID, why? + 0x218, # Table does not exist - # TDengine 3.0 Error Codes: - 0x0333, # Object is creating # TODO: this really is NOT an acceptable error - 0x0369, # Tag already exists - 0x0388, # Database not exist - 0x03A0, # STable already exists - 0x03A1, # STable [does] not exist - 0x03AA, # Tag already exists - 0x0603, # Table already exists - 0x2603, # Table does not exist, replaced by 2662 below - 0x260d, # Tags number not matched - 0x2662, # Table does not exist #TODO: what about 2603 above? - 0x2600, # database not specified, SQL: show stables , database droped , and show tables - 0x032C, # Object is creating - 0x032D, # Object is dropping - 0x03D3, # Conflict transaction not completed - 0x0707, # Query not ready , it always occur at replica 3 - 0x707, # Query not ready - 0x396, # Database in creating status - 0x386, # Database in droping status - 0x03E1, # failed on tmq_subscribe ,topic not exist - 0x03ed , # Topic must be dropped first, SQL: drop database db_0 - 0x0203 , # Invalid value - 0x03f0 , # Stream already exist , topic already exists + # TDengine 3.0 Error Codes: + 0x0333, # Object is creating # TODO: this really is NOT an acceptable error + 0x0369, # Tag already exists + 0x0388, # Database not exist + 0x03A0, # STable already exists + 0x03A1, # STable [does] not exist + 0x03AA, # Tag already exists + 0x0603, # Table already exists + 0x2603, # Table does not exist, replaced by 2662 below + 0x260d, # Tags number not matched + 0x2662, # Table does not exist #TODO: what about 2603 above? + 0x2600, # database not specified, SQL: show stables , database droped , and show tables + 0x032C, # Object is creating + 0x032D, # Object is dropping + 0x03D3, # Conflict transaction not completed + 0x0707, # Query not ready , it always occur at replica 3 + 0x707, # Query not ready + 0x396, # Database in creating status + 0x386, # Database in droping status + 0x03E1, # failed on tmq_subscribe ,topic not exist + 0x03ed, # Topic must be dropped first, SQL: drop database db_0 + 0x0203, # Invalid value + 0x03f0, # Stream already exist , topic already exists - - - - 1000 # REST catch-all error - ]: - return True # These are the ALWAYS-ACCEPTABLE ones + 1000 # REST catch-all error + ]: + return True # These are the ALWAYS-ACCEPTABLE ones # This case handled below already. # elif (errno in [ 0x0B ]) and Settings.getConfig().auto_start_service: # return True # We may get "network unavilable" when restarting service - elif Config.getConfig().ignore_errors: # something is specified on command line + elif Config.getConfig().ignore_errors: # something is specified on command line moreErrnos = [int(v, 0) for v in Config.getConfig().ignore_errors.split(',')] if errno in moreErrnos: return True - elif errno == 0x200 : # invalid SQL, we need to div in a bit more + elif errno == 0x200: # invalid SQL, we need to div in a bit more if msg.find("invalid column name") != -1: - return True - elif msg.find("tags number not matched") != -1: # mismatched tags after modification return True - elif msg.find("duplicated column names") != -1: # also alter table tag issues + elif msg.find("tags number not matched") != -1: # mismatched tags after modification return True - elif not self._isServiceStable(): # We are managing service, and ... + elif msg.find("duplicated column names") != -1: # also alter table tag issues + return True + elif not self._isServiceStable(): # We are managing service, and ... Logging.info("Ignoring error when service starting/stopping: errno = {}, msg = {}".format(errno, msg)) return True - - return False # Not an acceptable error + return False # Not an acceptable error def execute(self, wt: WorkerThread): wt.verifyThreadSelf() @@ -1453,7 +1457,7 @@ class Task(): self.logDebug( "[-] executing task {}...".format(self.__class__.__name__)) - self._err = None # TODO: type hint mess up? + self._err = None # TODO: type hint mess up? self._execStats.beginTaskType(self.__class__.__name__) # mark beginning errno2 = None @@ -1465,19 +1469,19 @@ class Task(): errno2 = Helper.convertErrno(err.errno) if (Config.getConfig().continue_on_exception): # user choose to continue self.logDebug("[=] Continue after TAOS exception: errno=0x{:X}, msg: {}, SQL: {}".format( - errno2, err, wt.getDbConn().getLastSql())) + errno2, err, wt.getDbConn().getLastSql())) self._err = err elif self._isErrAcceptable(errno2, err.__str__()): self.logDebug("[=] Acceptable Taos library exception: errno=0x{:X}, msg: {}, SQL: {}".format( - errno2, err, wt.getDbConn().getLastSql())) + errno2, err, wt.getDbConn().getLastSql())) # print("_", end="", flush=True) Progress.emit(Progress.ACCEPTABLE_ERROR) self._err = err - else: # not an acceptable error + else: # not an acceptable error shortTid = threading.get_ident() % 10000 errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}".format( self.__class__.__name__, - errno2, + errno2, shortTid, err, wt.getDbConn().getLastSql()) self.logDebug(errMsg) @@ -1485,7 +1489,8 @@ class Task(): # raise # so that we see full stack traceback.print_exc() print( - "\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(errMsg) + + "\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format( + errMsg) + "----------------------------\n") # sys.exit(-1) self._err = err @@ -1502,10 +1507,10 @@ class Task(): traceback.print_exc() # except BaseException: # TODO: what is this again??!! # raise RuntimeError("Punt") - # self.logDebug( - # "[=] Unexpected exception, SQL: {}".format( - # wt.getDbConn().getLastSql())) - # raise + # self.logDebug( + # "[=] Unexpected exception, SQL: {}".format( + # wt.getDbConn().getLastSql())) + # raise self._execStats.endTaskType(self.__class__.__name__, self.isSuccess()) self.logDebug("[X] task execution completed, {}, status: {}".format( @@ -1524,12 +1529,12 @@ class Task(): def getQueryResult(self, wt: WorkerThread): # execute an SQL on the worker thread return wt.getQueryResult() - def lockTable(self, ftName): # full table name + def lockTable(self, ftName): # full table name # print(" <<" + ftName + '_', end="", flush=True) - with Task._lock: # SHORT lock! so we only protect lock creation - if not ftName in Task._tableLocks: # Create new lock and add to list, if needed + with Task._lock: # SHORT lock! so we only protect lock creation + if not ftName in Task._tableLocks: # Create new lock and add to list, if needed Task._tableLocks[ftName] = threading.Lock() - + # No lock protection, anybody can do this any time lock = Task._tableLocks[ftName] # Logging.info("Acquiring lock: {}, {}".format(ftName, lock)) @@ -1538,7 +1543,7 @@ class Task(): def unlockTable(self, ftName): # print('_' + ftName + ">> ", end="", flush=True) - with Task._lock: + with Task._lock: if not ftName in self._tableLocks: raise RuntimeError("Corrupt state, no such lock") lock = Task._tableLocks[ftName] @@ -1588,11 +1593,11 @@ class ExecutionStats: t[0] += 1 # index 0 has the "total" execution times if isSuccess: t[1] += 1 # index 1 has the "success" execution times - if eno != None: + if eno != None: if klassName not in self._errors: self._errors[klassName] = {} errors = self._errors[klassName] - errors[eno] = errors[eno]+1 if eno in errors else 1 + errors[eno] = errors[eno] + 1 if eno in errors else 1 def beginTaskType(self, klassName): with self._lock: @@ -1615,7 +1620,7 @@ class ExecutionStats: Logging.info( "----------------------------------------------------------------------") Logging.info( - "| Crash_Gen test {}, with the following stats:". format( + "| Crash_Gen test {}, with the following stats:".format( "FAILED (reason: {})".format( self._failureReason) if self._failed else "SUCCEEDED")) Logging.info("| Task Execution Times (success/total):") @@ -1628,7 +1633,7 @@ class ExecutionStats: # print("errors = {}".format(errors)) errStrs = ["0x{:X}:{}".format(eno, n) for (eno, n) in errors.items()] # print("error strings = {}".format(errStrs)) - errStr = ", ".join(errStrs) + errStr = ", ".join(errStrs) Logging.info("| {0:<24}: {1}/{2} (Errors: {3})".format(k, n[1], n[0], errStr)) Logging.info( @@ -1647,8 +1652,8 @@ class ExecutionStats: Logging.info("| Top numbers written: {}".format(TaskExecutor.getBoundedList())) Logging.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections)) Logging.info("| Longest native query time: {:.3f} seconds, started: {}". - format(MyTDSql.longestQueryTime, - time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime))) ) + format(MyTDSql.longestQueryTime, + time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime)))) Logging.info("| Longest native query: {}".format(MyTDSql.longestQuery)) Logging.info( "----------------------------------------------------------------------") @@ -1662,12 +1667,12 @@ class StateTransitionTask(Task): _baseTableNumber = None - _endState = None # TODO: no longter used? + _endState = None # TODO: no longter used? @classmethod def getInfo(cls): # each sub class should supply their own information raise RuntimeError("Overriding method expected") - + @classmethod def getEndState(cls): # TODO: optimize by calling it fewer times raise RuntimeError("Overriding method expected") @@ -1687,7 +1692,7 @@ class StateTransitionTask(Task): @classmethod def getRegTableName(cls, i): - if ( StateTransitionTask._baseTableNumber is None): # Set it one time + if (StateTransitionTask._baseTableNumber is None): # Set it one time StateTransitionTask._baseTableNumber = Dice.throw( 999) if Config.getConfig().dynamic_db_table_names else 0 return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i) @@ -1711,16 +1716,21 @@ class TaskCreateDb(StateTransitionTask): repStr = "" if Config.getConfig().num_replicas != 1: # numReplica = Dice.throw(Settings.getConfig().max_replicas) + 1 # 1,2 ... N - numReplica = Config.getConfig().num_replicas # fixed, always + numReplica = Config.getConfig().num_replicas # fixed, always repStr = "replica {}".format(numReplica) - updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1 - vg_nums = random.randint(1,8) - cache_model = Dice.choice(['none' , 'last_row' , 'last_value' , 'both']) - buffer = random.randint(3,128) + updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1 + vg_nums = random.randint(1, 8) + cache_model = Dice.choice(['none', 'last_row', 'last_value', 'both']) + buffer = random.randint(3, 128) dbName = self._db.getName() - self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, updatePostfix, vg_nums, cache_model,buffer ) ) + self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, + updatePostfix, + vg_nums, + cache_model, + buffer)) if dbName == "db_0" and Config.getConfig().use_shadow_db: - self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix ) ) + self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix)) + class TaskDropDb(StateTransitionTask): @classmethod @@ -1732,19 +1742,20 @@ class TaskDropDb(StateTransitionTask): return state.canDropDb() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - + try: - self.queryWtSql(wt, "drop database {}".format(self._db.getName())) # drop database maybe failed ,because topic exists + self.queryWtSql(wt, "drop database {}".format( + self._db.getName())) # drop database maybe failed ,because topic exists except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [0x0203]: # drop maybe failed + if errno in [0x0203]: # drop maybe failed pass Logging.debug("[OPS] database dropped at {}".format(time.time())) class TaskCreateStream(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1755,39 +1766,40 @@ class TaskCreateStream(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): dbname = self._db.getName() - - sub_stream_name = dbname+ '_sub_stream' + + sub_stream_name = dbname + '_sub_stream' sub_stream_tb_name = 'stream_tb_sub' - super_stream_name = dbname+ '_super_stream' + super_stream_name = dbname + '_super_stream' super_stream_tb_name = 'stream_tb_super' if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - stbname =sTable.getName() + stbname = sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) - aggExpr = Dice.choice([ - 'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)','min(speed)', 'max(speed)', 'first(speed)', 'last(speed)', - 'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)']) - - stream_sql = '' # set default value + aggExpr = Dice.choice([ + 'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)', 'min(speed)', 'max(speed)', 'first(speed)', + 'last(speed)', + 'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)']) + + stream_sql = '' # set default value if sub_tables: sub_tbname = sub_tables[0] # create stream with query above sub_table - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ - format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '. \ + format(sub_stream_name, dbname, sub_stream_tb_name, aggExpr, dbname, sub_tbname) else: - stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ - format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) + stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '. \ + format(super_stream_name, dbname, super_stream_tb_name, aggExpr, dbname, stbname) self.execWtSql(wt, stream_sql) Logging.debug("[OPS] stream is creating at {}".format(time.time())) class TaskCreateTopic(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1798,40 +1810,46 @@ class TaskCreateTopic(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): dbname = self._db.getName() - - sub_topic_name = dbname+ '_sub_topic' - super_topic_name = dbname+ '_super_topic' - stable_topic = dbname+ '_stable_topic' - db_topic = 'database_' + dbname+ '_topics' + + sub_topic_name = dbname + '_sub_topic' + super_topic_name = dbname + '_super_topic' + stable_topic = dbname + '_stable_topic' + db_topic = 'database_' + dbname + '_topics' if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place # create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1; - stbname =sTable.getName() + stbname = sTable.getName() sub_tables = sTable.getRegTables(wt.getDbConn()) - scalarExpr = Dice.choice([ '*','speed','color','abs(speed)','acos(speed)','asin(speed)','atan(speed)','ceil(speed)','cos(speed)','cos(speed)', - 'floor(speed)','log(speed,2)','pow(speed,2)','round(speed)','sin(speed)','sqrt(speed)','char_length(color)','concat(color,color)', - 'concat_ws(" ", color,color," ")','length(color)', 'lower(color)', 'ltrim(color)','substr(color , 2)','upper(color)','cast(speed as double)', - 'cast(ts as bigint)']) + scalarExpr = Dice.choice( + ['*', 'speed', 'color', 'abs(speed)', 'acos(speed)', 'asin(speed)', 'atan(speed)', 'ceil(speed)', + 'cos(speed)', 'cos(speed)', + 'floor(speed)', 'log(speed,2)', 'pow(speed,2)', 'round(speed)', 'sin(speed)', 'sqrt(speed)', + 'char_length(color)', 'concat(color,color)', + 'concat_ws(" ", color,color," ")', 'length(color)', 'lower(color)', 'ltrim(color)', 'substr(color , 2)', + 'upper(color)', 'cast(speed as double)', + 'cast(ts as bigint)']) topic_sql = '' # set default value - if Dice.throw(3)==0: # create topic : source data from sub query - if sub_tables: # if not empty + if Dice.throw(3) == 0: # create topic : source data from sub query + if sub_tables: # if not empty sub_tbname = sub_tables[0] # create topic : source data from sub query of sub stable - topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname) - + topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name, scalarExpr, dbname, + sub_tbname) + else: # create topic : source data from sub query of stable - topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name,scalarExpr, dbname,stbname) - elif Dice.throw(3)==1: # create topic : source data from super table - topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname) - - elif Dice.throw(3)==2: # create topic : source data from whole database - topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname) + topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name, scalarExpr, dbname, + stbname) + elif Dice.throw(3) == 1: # create topic : source data from super table + topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic, dbname, stbname) + + elif Dice.throw(3) == 2: # create topic : source data from whole database + topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic, dbname) else: pass @@ -1840,8 +1858,9 @@ class TaskCreateTopic(StateTransitionTask): self.execWtSql(wt, topic_sql) Logging.debug("[OPS] db topic is creating at {}".format(time.time())) + class TaskDropTopics(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1852,21 +1871,21 @@ class TaskDropTopics(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): dbname = self._db.getName() - if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place tblName = sTable.getName() if sTable.hasTopics(wt.getDbConn()): - sTable.dropTopics(wt.getDbConn(),dbname,None) # drop topics of database - sTable.dropTopics(wt.getDbConn(),dbname,tblName) # drop topics of stable + sTable.dropTopics(wt.getDbConn(), dbname, None) # drop topics of database + sTable.dropTopics(wt.getDbConn(), dbname, tblName) # drop topics of stable + class TaskDropStreams(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1877,20 +1896,20 @@ class TaskDropStreams(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # dbname = self._db.getName() - if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place # tblName = sTable.getName() if sTable.hasStreams(wt.getDbConn()): sTable.dropStreams(wt.getDbConn()) # drop stream of database + class TaskDropStreamTables(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @@ -1901,42 +1920,42 @@ class TaskDropStreamTables(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # dbname = self._db.getName() - if not self._db.exists(wt.getDbConn()): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable - wt.execSql("use db") # should always be in place + sTable = self._db.getFixedSuperTable() # type: TdSuperTable + wt.execSql("use db") # should always be in place # tblName = sTable.getName() if sTable.hasStreamTables(wt.getDbConn()): - sTable.dropStreamTables(wt.getDbConn()) # drop stream tables + sTable.dropStreamTables(wt.getDbConn()) # drop stream tables + class TaskCreateConsumers(StateTransitionTask): - + @classmethod def getEndState(cls): return StateHasData() @classmethod def canBeginFrom(cls, state: AnyState): - return state.canCreateConsumers() + return state.canCreateConsumers() def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - if Config.getConfig().connector_type == 'native': - - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + if Config.getConfig().connector_type == 'native': + + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place if sTable.hasTopics(wt.getDbConn()): - sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) + sTable.createConsumer(wt.getDbConn(), random.randint(1, 10)) pass else: print(" restful not support tmq consumers") - return + return + - class TaskCreateSuperTable(StateTransitionTask): @classmethod def getEndState(cls): @@ -1951,9 +1970,9 @@ class TaskCreateSuperTable(StateTransitionTask): Logging.debug("Skipping task, no DB yet") return - sTable = self._db.getFixedSuperTable() # type: TdSuperTable + sTable = self._db.getFixedSuperTable() # type: TdSuperTable # wt.execSql("use db") # should always be in place - + sTable.create(wt.getDbConn(), {'ts': TdDataType.TIMESTAMP, 'speed': TdDataType.INT, 'color': TdDataType.BINARY16}, { 'b': TdDataType.BINARY200, 'f': TdDataType.FLOAT}, @@ -1974,11 +1993,10 @@ class TdSuperTable: def getName(self): return self._stName - - def drop(self, dbc, skipCheck = False): + def drop(self, dbc, skipCheck=False): dbName = self._dbName - if self.exists(dbc) : # if myself exists - fullTableName = dbName + '.' + self._stName + if self.exists(dbc): # if myself exists + fullTableName = dbName + '.' + self._stName dbc.execute("DROP TABLE {}".format(fullTableName)) else: if not skipCheck: @@ -1989,64 +2007,55 @@ class TdSuperTable: return dbc.existsSuperTable(self._stName) # TODO: odd semantic, create() method is usually static? - def create(self, dbc, cols: TdColumns, tags: TdTags, dropIfExists = False): + def create(self, dbc, cols: TdColumns, tags: TdTags, dropIfExists=False): '''Creating a super table''' dbName = self._dbName dbc.execute("USE " + dbName) - fullTableName = dbName + '.' + self._stName + fullTableName = dbName + '.' + self._stName if dbc.existsSuperTable(self._stName): - if dropIfExists: - dbc.execute("DROP TABLE {}".format(fullTableName)) - - else: # error + if dropIfExists: + dbc.execute("DROP TABLE {}".format(fullTableName)) + + else: # error raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName)) # Now let's create sql = "CREATE TABLE {} ({})".format( fullTableName, - ",".join(['%s %s'%(k,v.value) for (k,v) in cols.items()])) - if tags : + ",".join(['%s %s' % (k, v.value) for (k, v) in cols.items()])) + if tags: sql += " TAGS ({})".format( - ",".join(['%s %s'%(k,v.value) for (k,v) in tags.items()]) - ) + ",".join(['%s %s' % (k, v.value) for (k, v) in tags.items()]) + ) else: sql += " TAGS (dummy int) " - dbc.execute(sql) + dbc.execute(sql) + + def createConsumer(self, dbc, Consumer_nums): - def createConsumer(self, dbc,Consumer_nums): - def generateConsumer(current_topic_list): - conf = TaosTmqConf() - conf.set("group.id", "tg2") - conf.set("td.connect.user", "root") - conf.set("td.connect.pass", "taosdata") -# conf.set("enable.auto.commit", "true") -# def tmq_commit_cb_print(tmq, resp, offset, param=None): -# print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}") -# conf.set_auto_commit_cb(tmq_commit_cb_print, None) - consumer = conf.new_consumer() - topic_list = TaosTmqList() + consumer = Consumer({"group.id": "tg2", "td.connect.user": "root", "td.connect.pass": "taosdata"}) + topic_list = [] for topic in current_topic_list: topic_list.append(topic) - try: - consumer.subscribe(topic_list) - except TmqError as e : - pass + + consumer.subscribe(topic_list) # consumer with random work life time_start = time.time() while 1: - res = consumer.poll(1000) - if time.time() - time_start >random.randint(5,50) : + res = consumer.poll(1) + consumer.commit(res) + if time.time() - time_start > random.randint(5, 50): break try: consumer.unsubscribe() - except TmqError as e : + except TmqError as e: pass return - + # mulit Consumer current_topic_list = self.getTopicLists(dbc) for i in range(Consumer_nums): @@ -2067,84 +2076,86 @@ class TdSuperTable: def getRegTables(self, dbc: DbConn): dbName = self._dbName try: - dbc.query("select distinct TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later - except taos.error.ProgrammingError as err: - errno2 = Helper.convertErrno(err.errno) + dbc.query("select distinct TBNAME from {}.{}".format(dbName, + self._stName)) # TODO: analyze result set later + except taos.error.ProgrammingError as err: + errno2 = Helper.convertErrno(err.errno) Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) raise qr = dbc.getQueryResult() - return [v[0] for v in qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation + return [v[0] for v in + qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation def hasRegTables(self, dbc: DbConn): - + if dbc.existsSuperTable(self._stName): return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 else: return False - def hasStreamTables(self,dbc: DbConn): - + def hasStreamTables(self, dbc: DbConn): + return dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) > 0 - def hasStreams(self,dbc: DbConn): + def hasStreams(self, dbc: DbConn): return dbc.query("show streams") > 0 - def hasTopics(self,dbc: DbConn): - + def hasTopics(self, dbc: DbConn): + return dbc.query("show topics") > 0 - def dropTopics(self,dbc: DbConn , dbname=None,stb_name=None): + def dropTopics(self, dbc: DbConn, dbname=None, stb_name=None): dbc.query("show topics ") topics = dbc.getQueryResult() - if dbname !=None and stb_name == None : - + if dbname != None and stb_name == None: + for topic in topics: if dbname in topic[0] and topic[0].startswith("database"): try: dbc.execute('drop topic {}'.format(topic[0])) - Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) - except taos.error.ProgrammingError as err: + Logging.debug("[OPS] topic {} is droping at {}".format(topic, time.time())) + except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [0x03EB]: # Topic subscribed cannot be dropped - pass + if errno in [0x03EB]: # Topic subscribed cannot be dropped + pass # for subsript in subscriptions: - + else: pass pass return True - elif dbname !=None and stb_name!= None: + elif dbname != None and stb_name != None: for topic in topics: if topic[0].startswith(self._dbName) and topic[0].endswith('topic'): dbc.execute('drop topic {}'.format(topic[0])) - Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) + Logging.debug("[OPS] topic {} is droping at {}".format(topic, time.time())) return True else: return True pass - def dropStreams(self,dbc:DbConn): + def dropStreams(self, dbc: DbConn): dbc.query("show streams ") Streams = dbc.getQueryResult() for Stream in Streams: if Stream[0].startswith(self._dbName): dbc.execute('drop stream {}'.format(Stream[0])) - + return not dbc.query("show streams ") > 0 def dropStreamTables(self, dbc: DbConn): dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) - + StreamTables = dbc.getQueryResult() - for StreamTable in StreamTables: + for StreamTable in StreamTables: if self.dropStreams(dbc): - dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0])) - + dbc.execute('drop table {}.{}'.format(self._dbName, StreamTable[0])) + return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str): @@ -2155,16 +2166,16 @@ class TdSuperTable: ''' dbName = self._dbName sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) - if dbc.query(sql) >= 1 : # reg table exists already + if dbc.query(sql) >= 1: # reg table exists already return # acquire a lock first, so as to be able to *verify*. More details in TD-1471 - fullTableName = dbName + '.' + regTableName + fullTableName = dbName + '.' + regTableName if task is not None: # Somethime thie operation is requested on behalf of a "task" # Logging.info("Locking table for creation: {}".format(fullTableName)) - task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access + task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access # Logging.info("Table locked for creation".format(fullTableName)) - Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table + Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table # print("(" + fullTableName[-3:] + ")", end="", flush=True) try: sql = "CREATE TABLE {} USING {}.{} tags ({})".format( @@ -2176,17 +2187,17 @@ class TdSuperTable: finally: if task is not None: # Logging.info("Unlocking table after creation: {}".format(fullTableName)) - task.unlockTable(fullTableName) # no matter what + task.unlockTable(fullTableName) # no matter what # Logging.info("Table unlocked after creation: {}".format(fullTableName)) - def _getTagStrForSql(self, dbc) : + def _getTagStrForSql(self, dbc): tags = self._getTags(dbc) tagStrs = [] - for tagName in tags: + for tagName in tags: tagType = tags[tagName] if tagType == 'BINARY': tagStrs.append("'Beijing-Shanghai-LosAngeles'") - elif tagType== 'VARCHAR': + elif tagType == 'VARCHAR': tagStrs.append("'London-Paris-Berlin'") elif tagType == 'FLOAT': tagStrs.append('9.9') @@ -2200,12 +2211,12 @@ class TdSuperTable: dbc.query("DESCRIBE {}.{}".format(self._dbName, self._stName)) stCols = dbc.getQueryResult() # print(stCols) - ret = {row[0]:row[1] for row in stCols if row[3]=='TAG'} # name:type + ret = {row[0]: row[1] for row in stCols if row[3] == 'TAG'} # name:type # print("Tags retrieved: {}".format(ret)) return ret def addTag(self, dbc, tagName, tagType): - if tagName in self._getTags(dbc): # already + if tagName in self._getTags(dbc): # already return # sTable.addTag("extraTag", "int") sql = "alter table {}.{} add tag {} {}".format( @@ -2213,33 +2224,33 @@ class TdSuperTable: dbc.execute(sql) def dropTag(self, dbc, tagName): - if not tagName in self._getTags(dbc): # don't have this tag + if not tagName in self._getTags(dbc): # don't have this tag return sql = "alter table {}.{} drop tag {}".format(self._dbName, self._stName, tagName) dbc.execute(sql) def changeTag(self, dbc, oldTag, newTag): tags = self._getTags(dbc) - if not oldTag in tags: # don't have this tag + if not oldTag in tags: # don't have this tag return - if newTag in tags: # already have this tag + if newTag in tags: # already have this tag return sql = "alter table {}.{} change tag {} {}".format(self._dbName, self._stName, oldTag, newTag) dbc.execute(sql) def generateQueries(self, dbc: DbConn) -> List[SqlQuery]: ''' Generate queries to test/exercise this super table ''' - ret = [] # type: List[SqlQuery] + ret = [] # type: List[SqlQuery] for rTbName in self.getRegTables(dbc): # regular tables - - filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions + + filterExpr = Dice.choice([ # TODO: add various kind of WHERE conditions None ]) # Run the query against the regular table first - doAggr = (Dice.throw(2) == 0) # 1 in 2 chance - if not doAggr: # don't do aggregate query, just simple one + doAggr = (Dice.throw(2) == 0) # 1 in 2 chance + if not doAggr: # don't do aggregate query, just simple one commonExpr = Dice.choice([ '*', 'abs(speed)', @@ -2256,7 +2267,7 @@ class TdSuperTable: 'sin(speed)', 'sqrt(speed)', 'char_length(color)', - 'concat(color,color)', + 'concat(color,color)', 'concat_ws(" ", color,color," ")', 'length(color)', 'lower(color)', @@ -2276,26 +2287,26 @@ class TdSuperTable: 'distinct(color)' ] ) - ret.append(SqlQuery( # reg table + ret.append(SqlQuery( # reg table "select {} from {}.{}".format(commonExpr, self._dbName, rTbName))) - ret.append(SqlQuery( # super table + ret.append(SqlQuery( # super table "select {} from {}.{}".format(commonExpr, self._dbName, self.getName()))) - else: # Aggregate query - aggExpr = Dice.choice([ + else: # Aggregate query + aggExpr = Dice.choice([ 'count(*)', 'avg(speed)', # 'twa(speed)', # TODO: this one REQUIRES a where statement, not reasonable - 'sum(speed)', - 'stddev(speed)', + 'sum(speed)', + 'stddev(speed)', # SELECTOR functions - 'min(speed)', - 'max(speed)', - 'first(speed)', + 'min(speed)', + 'max(speed)', + 'first(speed)', 'last(speed)', - 'top(speed, 50)', # TODO: not supported? - 'bottom(speed, 50)', # TODO: not supported? - 'apercentile(speed, 10)', # TODO: TD-1316 - 'last_row(*)', # TODO: commented out per TD-3231, we should re-create + 'top(speed, 50)', # TODO: not supported? + 'bottom(speed, 50)', # TODO: not supported? + 'apercentile(speed, 10)', # TODO: TD-1316 + 'last_row(*)', # TODO: commented out per TD-3231, we should re-create # Transformation Functions # 'diff(speed)', # TODO: no supported?! 'spread(speed)', @@ -2313,21 +2324,21 @@ class TdSuperTable: 'sample(speed,5)', 'STATECOUNT(speed,"LT",1)', 'STATEDURATION(speed,"LT",1)', - 'twa(speed)' - - ]) # TODO: add more from 'top' + 'twa(speed)' + + ]) # TODO: add more from 'top' - # if aggExpr not in ['stddev(speed)']: # STDDEV not valid for super tables?! (Done in TD-1049) sql = "select {} from {}.{}".format(aggExpr, self._dbName, self.getName()) - if Dice.throw(3) == 0: # 1 in X chance - partion_expr = Dice.choice(['color','tbname']) + if Dice.throw(3) == 0: # 1 in X chance + partion_expr = Dice.choice(['color', 'tbname']) sql = sql + ' partition BY ' + partion_expr + ' order by ' + partion_expr Progress.emit(Progress.QUERY_GROUP_BY) # Logging.info("Executing GROUP-BY query: " + sql) ret.append(SqlQuery(sql)) - return ret + return ret + class TaskReadData(StateTransitionTask): @classmethod @@ -2345,60 +2356,61 @@ class TaskReadData(StateTransitionTask): def _reconnectIfNeeded(self, wt): # 1 in 20 chance, simulate a broken connection, only if service stable (not restarting) - if random.randrange(20)==0: # and self._canRestartService(): # TODO: break connection in all situations + if random.randrange(20) == 0: # and self._canRestartService(): # TODO: break connection in all situations # Logging.info("Attempting to reconnect to server") # TODO: change to DEBUG - Progress.emit(Progress.SERVICE_RECONNECT_START) + Progress.emit(Progress.SERVICE_RECONNECT_START) try: wt.getDbConn().close() wt.getDbConn().open() - except ConnectionError as err: # may fail + except ConnectionError as err: # may fail if not gSvcMgr: Logging.error("Failed to reconnect in client-only mode") - raise # Not OK if we are running in client-only mode - if gSvcMgr.isRunning(): # may have race conditon, but low prob, due to + raise # Not OK if we are running in client-only mode + if gSvcMgr.isRunning(): # may have race conditon, but low prob, due to Logging.error("Failed to reconnect when managed server is running") - raise # Not OK if we are running normally + raise # Not OK if we are running normally - Progress.emit(Progress.SERVICE_RECONNECT_FAILURE) + Progress.emit(Progress.SERVICE_RECONNECT_FAILURE) # Logging.info("Ignoring DB reconnect error") # print("_r", end="", flush=True) - Progress.emit(Progress.SERVICE_RECONNECT_SUCCESS) + Progress.emit(Progress.SERVICE_RECONNECT_SUCCESS) # The above might have taken a lot of time, service might be running # by now, causing error below to be incorrectly handled due to timing issue - return # TODO: fix server restart status race condtion - + return # TODO: fix server restart status race condtion def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): self._reconnectIfNeeded(wt) dbc = wt.getDbConn() sTable = self._db.getFixedSuperTable() - + for q in sTable.generateQueries(dbc): # regular tables try: sql = q.getSql() # if 'GROUP BY' in sql: # Logging.info("Executing GROUP-BY query: " + sql) dbc.execute(sql) - except taos.error.ProgrammingError as err: + except taos.error.ProgrammingError as err: errno2 = Helper.convertErrno(err.errno) Logging.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql())) raise + class SqlQuery: @classmethod def buildRandom(cls, db: Database): '''Build a random query against a certain database''' - + dbName = db.getName() - def __init__(self, sql:str = None): + def __init__(self, sql: str = None): self._sql = sql def getSql(self): return self._sql - + + class TaskDropSuperTable(StateTransitionTask): @classmethod def getEndState(cls): @@ -2421,7 +2433,7 @@ class TaskDropSuperTable(StateTransitionTask): regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) try: self.execWtSql(wt, "drop table {}.{}". - format(self._db.getName(), regTableName)) # nRows always 0, like MySQL + format(self._db.getName(), regTableName)) # nRows always 0, like MySQL except taos.error.ProgrammingError as err: # correcting for strange error number scheme errno2 = Helper.convertErrno(err.errno) @@ -2429,7 +2441,6 @@ class TaskDropSuperTable(StateTransitionTask): isSuccess = False Logging.debug("[DB] Acceptable error when dropping a table") continue # try to delete next regular table - if (not tickOutput): tickOutput = True # Print only one time @@ -2441,8 +2452,6 @@ class TaskDropSuperTable(StateTransitionTask): # Drop the super table itself tblName = self._db.getFixedSuperTableName() self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) - - class TaskAlterTags(StateTransitionTask): @@ -2472,6 +2481,7 @@ class TaskAlterTags(StateTransitionTask): sTable.changeTag(dbc, "extraTag", "newTag") # sql = "alter table db.{} change tag extraTag newTag".format(tblName) + class TaskRestartService(StateTransitionTask): _isRunning = False _classLock = threading.Lock() @@ -2484,11 +2494,12 @@ class TaskRestartService(StateTransitionTask): def canBeginFrom(cls, state: AnyState): if Config.getConfig().auto_start_service: return state.canDropFixedSuperTable() # Basicallly when we have the super table - return False # don't run this otherwise + return False # don't run this otherwise CHANCE_TO_RESTART_SERVICE = 200 + def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): - if not Config.getConfig().auto_start_service: # only execute when we are in -a mode + if not Config.getConfig().auto_start_service: # only execute when we are in -a mode print("_a", end="", flush=True) return @@ -2498,20 +2509,22 @@ class TaskRestartService(StateTransitionTask): return self._isRunning = True - if Dice.throw(self.CHANCE_TO_RESTART_SERVICE) == 0: # 1 in N chance + if Dice.throw(self.CHANCE_TO_RESTART_SERVICE) == 0: # 1 in N chance dbc = wt.getDbConn() - dbc.execute("select * from information_schema.ins_databases") # simple delay, align timing with other workers + dbc.execute( + "select * from information_schema.ins_databases") # simple delay, align timing with other workers gSvcMgr.restart() self._isRunning = False + class TaskAddData(StateTransitionTask): # Track which table is being actively worked on activeTable: Set[int] = set() # We use these two files to record operations to DB, useful for power-off tests - fAddLogReady = None # type: Optional[io.TextIOWrapper] - fAddLogDone = None # type: Optional[io.TextIOWrapper] + fAddLogReady = None # type: Optional[io.TextIOWrapper] + fAddLogDone = None # type: Optional[io.TextIOWrapper] @classmethod def prepToRecordOps(cls): @@ -2532,12 +2545,12 @@ class TaskAddData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canAddData() - def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + def _lockTableIfNeeded(self, fullTableName, extraMsg=''): if Config.getConfig().verify_data: # Logging.info("Locking table: {}".format(fullTableName)) - self.lockTable(fullTableName) + self.lockTable(fullTableName) # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) - # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written else: # Logging.info("Skipping locking table") pass @@ -2545,15 +2558,15 @@ class TaskAddData(StateTransitionTask): def _unlockTableIfNeeded(self, fullTableName): if Config.getConfig().verify_data: # Logging.info("Unlocking table: {}".format(fullTableName)) - self.unlockTable(fullTableName) + self.unlockTable(fullTableName) # Logging.info("Table unlocked: {}".format(fullTableName)) else: pass # Logging.info("Skipping unlocking table") - def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): - numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - + def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + fullTableName = db.getName() + '.' + regTableName self._lockTableIfNeeded(fullTableName, 'batch') @@ -2571,10 +2584,8 @@ class TaskAddData(StateTransitionTask): # Logging.info("Data added in batch: {}".format(sql)) self._unlockTableIfNeeded(fullTableName) - - - def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches - numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS for j in range(numRecords): # number of records per table intToWrite = db.getNextInt() @@ -2587,13 +2598,14 @@ class TaskAddData(StateTransitionTask): self.fAddLogReady.write("Ready to write {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) - + # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock - + self._lockTableIfNeeded( + fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: - sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) + sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) fullTableName, # ds.getFixedSuperTableName(), # ds.getNextBinary(), ds.getNextFloat(), @@ -2604,55 +2616,56 @@ class TaskAddData(StateTransitionTask): intWrote = intToWrite # Quick hack, attach an update statement here. TODO: create an "update" task - if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - intToUpdate = db.getNextInt() # Updated, but should not succeed + if (not Config.getConfig().use_shadow_db) and Dice.throw( + 5) == 0: # 1 in N chance, plus not using shaddow DB + intToUpdate = db.getNextInt() # Updated, but should not succeed nextColor = db.getNextColor() - sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here - fullTableName, - nextTick, intToUpdate, nextColor) + sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here + fullTableName, + nextTick, intToUpdate, nextColor) # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) dbc.execute(sql) - intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. - except: # Any exception at all - self._unlockTableIfNeeded(fullTableName) + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped - if Config.getConfig().verify_data: # only if command line asks for it + if Config.getConfig().verify_data: # only if command line asks for it try: readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". - format(db.getName(), regTableName, nextTick)) - if readBack != intWrote : + format(db.getName(), regTableName, nextTick)) + if readBack != intWrote: raise taos.error.ProgrammingError( "Failed to read back same data, wrote: {}, read: {}" .format(intWrote, readBack), 0x999) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result + if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result raise taos.error.ProgrammingError( "Failed to read back same data for tick: {}, wrote: {}, read: EMPTY" .format(nextTick, intWrote), errno) - elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results + elif errno == CrashGenError.INVALID_MULTIPLE_RESULT: # multiple results raise taos.error.ProgrammingError( "Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS" .format(nextTick, intWrote), errno) - elif errno in [0x218, 0x362]: # table doesn't exist + elif errno in [0x218, 0x362]: # table doesn't exist # do nothing pass else: # Re-throw otherwise raise finally: - self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock # Done with read-back verification, unlock the table now else: - self._unlockTableIfNeeded(fullTableName) + self._unlockTableIfNeeded(fullTableName) - # Successfully wrote the data into the DB, let's record it somehow + # Successfully wrote the data into the DB, let's record it somehow te.recordDataMark(intWrote) if Config.getConfig().record_ops: @@ -2666,17 +2679,17 @@ class TaskAddData(StateTransitionTask): # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access db = self._db dbc = wt.getDbConn() - numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES + numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - tblSeq = list(range(numTables )) - random.shuffle(tblSeq) # now we have random sequence + tblSeq = list(range(numTables)) + random.shuffle(tblSeq) # now we have random sequence for i in tblSeq: if (i in self.activeTable): # wow already active # print("x", end="", flush=True) # concurrent insertion Progress.emit(Progress.CONCURRENT_INSERTION) else: self.activeTable.add(i) # marking it active - + dbName = db.getName() sTable = db.getFixedSuperTable() regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) @@ -2684,21 +2697,22 @@ class TaskAddData(StateTransitionTask): # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked" sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists # self._unlockTable(fullTableName) - - if Dice.throw(1) == 0: # 1 in 2 chance + + if Dice.throw(1) == 0: # 1 in 2 chance self._addData(db, dbc, regTableName, te) else: self._addDataInBatch(db, dbc, regTableName, te) self.activeTable.discard(i) # not raising an error, unlike remove + class TaskDeleteData(StateTransitionTask): # Track which table is being actively worked on activeTable: Set[int] = set() # We use these two files to record operations to DB, useful for power-off tests - fAddLogReady = None # type: Optional[io.TextIOWrapper] - fAddLogDone = None # type: Optional[io.TextIOWrapper] + fAddLogReady = None # type: Optional[io.TextIOWrapper] + fAddLogDone = None # type: Optional[io.TextIOWrapper] @classmethod def prepToRecordOps(cls): @@ -2719,12 +2733,12 @@ class TaskDeleteData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canDeleteData() - def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + def _lockTableIfNeeded(self, fullTableName, extraMsg=''): if Config.getConfig().verify_data: # Logging.info("Locking table: {}".format(fullTableName)) - self.lockTable(fullTableName) + self.lockTable(fullTableName) # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) - # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written else: # Logging.info("Skipping locking table") pass @@ -2732,15 +2746,15 @@ class TaskDeleteData(StateTransitionTask): def _unlockTableIfNeeded(self, fullTableName): if Config.getConfig().verify_data: # Logging.info("Unlocking table: {}".format(fullTableName)) - self.unlockTable(fullTableName) + self.unlockTable(fullTableName) # Logging.info("Table unlocked: {}".format(fullTableName)) else: pass # Logging.info("Skipping unlocking table") - def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches - numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - del_Records = int(numRecords/5) + def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches + numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + del_Records = int(numRecords / 5) if Dice.throw(2) == 0: for j in range(del_Records): # number of records per table intToWrite = db.getNextInt() @@ -2753,13 +2767,14 @@ class TaskDeleteData(StateTransitionTask): self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) - + # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock - + self._lockTableIfNeeded( + fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: - sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {}) + sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {}) fullTableName, # ds.getFixedSuperTableName(), # ds.getNextBinary(), ds.getNextFloat(), @@ -2772,45 +2787,46 @@ class TaskDeleteData(StateTransitionTask): intWrote = intToWrite # Quick hack, attach an update statement here. TODO: create an "update" task - if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - intToUpdate = db.getNextInt() # Updated, but should not succeed + if (not Config.getConfig().use_shadow_db) and Dice.throw( + 5) == 0: # 1 in N chance, plus not using shaddow DB + intToUpdate = db.getNextInt() # Updated, but should not succeed # nextColor = db.getNextColor() - sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here - fullTableName, - nextTick) + sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here + fullTableName, + nextTick) # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) dbc.execute(sql) - intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. - except: # Any exception at all - self._unlockTableIfNeeded(fullTableName) + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped - if Config.getConfig().verify_data: # only if command line asks for it + if Config.getConfig().verify_data: # only if command line asks for it try: dbc.query("SELECT * from {}.{} WHERE ts='{}'". - format(db.getName(), regTableName, nextTick)) + format(db.getName(), regTableName, nextTick)) result = dbc.getQueryResult() - if len(result)==0: + if len(result) == 0: # means data has been delete - print("D1",end="") # DF means delete failed + print("D1", end="") # DF means delete failed else: - print("DF",end="") # DF means delete failed + print("DF", end="") # DF means delete failed except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result # print("D1",end="") # D1 means delete data success and only 1 record - if errno in [0x218, 0x362,0x2662]: # table doesn't exist + if errno in [0x218, 0x362, 0x2662]: # table doesn't exist # do nothing pass else: # Re-throw otherwise raise finally: - self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock # Done with read-back verification, unlock the table now # Successfully wrote the data into the DB, let's record it somehow te.recordDataMark(intWrote) @@ -2824,52 +2840,54 @@ class TaskDeleteData(StateTransitionTask): self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) - + # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock - + self._lockTableIfNeeded( + fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: - sql = "delete from {} ;".format( # removed: tags ('{}', {}) + sql = "delete from {} ;".format( # removed: tags ('{}', {}) fullTableName) # Logging.info("Adding data: {}".format(sql)) dbc.execute(sql) # Logging.info("Data added: {}".format(sql)) - + # Quick hack, attach an update statement here. TODO: create an "update" task - if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - sql = "delete from {} ;".format( # "INSERt" means "update" here - fullTableName) + if (not Config.getConfig().use_shadow_db) and Dice.throw( + 5) == 0: # 1 in N chance, plus not using shaddow DB + sql = "delete from {} ;".format( # "INSERt" means "update" here + fullTableName) dbc.execute(sql) - except: # Any exception at all - self._unlockTableIfNeeded(fullTableName) + except: # Any exception at all + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped - if Config.getConfig().verify_data: # only if command line asks for it + if Config.getConfig().verify_data: # only if command line asks for it try: dbc.query("SELECT * from {}.{} WHERE ts='{}'". - format(db.getName(), regTableName, nextTick)) + format(db.getName(), regTableName, nextTick)) result = dbc.getQueryResult() - if len(result)==0: + if len(result) == 0: # means data has been delete - print("DA",end="") + print("DA", end="") else: - print("DF",end="") # DF means delete failed + print("DF", end="") # DF means delete failed except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result # print("Da",end="") # Da means delete data success and for all datas - if errno in [0x218, 0x362,0x2662]: # table doesn't exist + if errno in [0x218, 0x362, 0x2662]: # table doesn't exist # do nothing pass else: # Re-throw otherwise raise finally: - self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock # Done with read-back verification, unlock the table now if Config.getConfig().record_ops: @@ -2883,17 +2901,17 @@ class TaskDeleteData(StateTransitionTask): # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access db = self._db dbc = wt.getDbConn() - numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES + numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS - tblSeq = list(range(numTables )) - random.shuffle(tblSeq) # now we have random sequence + tblSeq = list(range(numTables)) + random.shuffle(tblSeq) # now we have random sequence for i in tblSeq: if (i in self.activeTable): # wow already active # print("x", end="", flush=True) # concurrent insertion Progress.emit(Progress.CONCURRENT_INSERTION) else: self.activeTable.add(i) # marking it active - + dbName = db.getName() sTable = db.getFixedSuperTable() regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i) @@ -2901,54 +2919,57 @@ class TaskDeleteData(StateTransitionTask): # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked" sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists # self._unlockTable(fullTableName) - + self._deleteData(db, dbc, regTableName, te) - + self.activeTable.discard(i) # not raising an error, unlike remove -class ThreadStacks: # stack info for all threads +class ThreadStacks: # stack info for all threads def __init__(self): self._allStacks = {} - allFrames = sys._current_frames() # All current stack frames, keyed with "ident" + allFrames = sys._current_frames() # All current stack frames, keyed with "ident" for th in threading.enumerate(): # For each thread - stack = traceback.extract_stack(allFrames[th.ident]) #type: ignore # Get stack for a thread - shortTid = th.native_id % 10000 #type: ignore - self._allStacks[shortTid] = stack # Was using th.native_id + stack = traceback.extract_stack(allFrames[th.ident]) # type: ignore # Get stack for a thread + shortTid = th.native_id % 10000 # type: ignore + self._allStacks[shortTid] = stack # Was using th.native_id - def record_current_time(self,current_time): + def record_current_time(self, current_time): self.current_time = current_time - def print(self, filteredEndName = None, filterInternal = False): - for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom + def print(self, filteredEndName=None, filterInternal=False): + for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom lastFrame = stack[-1] - if filteredEndName: # we need to filter out stacks that match this name - if lastFrame.name == filteredEndName : # end did not match + if filteredEndName: # we need to filter out stacks that match this name + if lastFrame.name == filteredEndName: # end did not match continue if filterInternal: - if lastFrame.name in ['wait', 'invoke_excepthook', - '_wait', # The Barrier exception - 'svcOutputReader', # the svcMgr thread - '__init__']: # the thread that extracted the stack - continue # ignore + if lastFrame.name in ['wait', 'invoke_excepthook', + '_wait', # The Barrier exception + 'svcOutputReader', # the svcMgr thread + '__init__']: # the thread that extracted the stack + continue # ignore # Now print print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(shortTid)) - + lastSqlForThread = DbConn.fetchSqlForThread(shortTid) last_sql_commit_time = DbConn.get_save_sql_time(shortTid) # time_cost = DbConn.get_time_cost() - print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, self.current_time-last_sql_commit_time ,lastSqlForThread)) + print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, + self.current_time - last_sql_commit_time, + lastSqlForThread)) stackFrame = 0 - for frame in stack: # was using: reversed(stack) + for frame in stack: # was using: reversed(stack) # print(frame) print("[{sf}] File {filename}, line {lineno}, in {name}".format( sf=stackFrame, filename=frame.filename, lineno=frame.lineno, name=frame.name)) print(" {}".format(frame.line)) stackFrame += 1 print("-----> End of Thread Info ----->\n") - if self.current_time-last_sql_commit_time >100: # dead lock occured + if self.current_time - last_sql_commit_time > 100: # dead lock occured print("maybe dead locked of thread {} ".format(shortTid)) + class ClientManager: def __init__(self): Logging.info("Starting service manager") @@ -3041,36 +3062,35 @@ class ClientManager: # time.sleep(2.0) # dbManager = None # release? - def run(self, svcMgr): + def run(self, svcMgr): # self._printLastNumbers() # global gConfig # Prepare Tde Instance global gContainer - tInst = gContainer.defTdeInstance = TdeInstance() # "subdir to hold the instance" + tInst = gContainer.defTdeInstance = TdeInstance() # "subdir to hold the instance" cfg = Config.getConfig() dbManager = DbManager(cfg.connector_type, tInst.getDbTarget()) # Regular function thPool = ThreadPool(cfg.num_threads, cfg.max_steps) self.tc = ThreadCoordinator(thPool, dbManager) - + Logging.info("Starting client instance: {}".format(tInst)) self.tc.run() # print("exec stats: {}".format(self.tc.getExecStats())) # print("TC failed = {}".format(self.tc.isFailed())) - if svcMgr: # gConfig.auto_start_service: + if svcMgr: # gConfig.auto_start_service: svcMgr.stopTaosServices() svcMgr = None - # Release global variables # gConfig = None Config.clearConfig() gSvcMgr = None logger = None - + thPool = None - dbManager.cleanUp() # destructor wouldn't run in time + dbManager.cleanUp() # destructor wouldn't run in time dbManager = None # Print exec status, etc., AFTER showing messages from the server @@ -3082,7 +3102,7 @@ class ClientManager: # Release variables here self.tc = None - gc.collect() # force garbage collection + gc.collect() # force garbage collection # h = hpy() # print("\n----- Final Python Heap -----\n") # print(h.heap()) @@ -3093,37 +3113,38 @@ class ClientManager: # self.tc.getDbManager().cleanUp() # clean up first, so we can show ZERO db connections self.tc.printStats() + class MainExec: - def __init__(self): + def __init__(self): self._clientMgr = None - self._svcMgr = None # type: Optional[ServiceManager] + self._svcMgr = None # type: Optional[ServiceManager] signal.signal(signal.SIGTERM, self.sigIntHandler) - signal.signal(signal.SIGINT, self.sigIntHandler) + signal.signal(signal.SIGINT, self.sigIntHandler) signal.signal(signal.SIGUSR1, self.sigUsrHandler) # different handler! def sigUsrHandler(self, signalNumber, frame): if self._clientMgr: self._clientMgr.sigUsrHandler(signalNumber, frame) - elif self._svcMgr: # Only if no client mgr, we are running alone + elif self._svcMgr: # Only if no client mgr, we are running alone self._svcMgr.sigUsrHandler(signalNumber, frame) - + def sigIntHandler(self, signalNumber, frame): - if self._svcMgr: + if self._svcMgr: self._svcMgr.sigIntHandler(signalNumber, frame) - if self._clientMgr: + if self._clientMgr: self._clientMgr.sigIntHandler(signalNumber, frame) def runClient(self): global gSvcMgr if Config.getConfig().auto_start_service: - gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert - gSvcMgr.startTaosServices() # we start, don't run - + gSvcMgr = self._svcMgr = ServiceManager(1) # hack alert + gSvcMgr.startTaosServices() # we start, don't run + self._clientMgr = ClientManager() ret = None - try: - ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside + try: + ret = self._clientMgr.run(self._svcMgr) # stop TAOS service inside except requests.exceptions.ConnectionError as err: Logging.warning("Failed to open REST connection to DB: {}".format(err)) # don't raise @@ -3131,10 +3152,11 @@ class MainExec: def runService(self): global gSvcMgr - gSvcMgr = self._svcMgr = ServiceManager(Config.getConfig().num_dnodes) # save it in a global variable TODO: hack alert + gSvcMgr = self._svcMgr = ServiceManager( + Config.getConfig().num_dnodes) # save it in a global variable TODO: hack alert - gSvcMgr.run() # run to some end state - gSvcMgr = self._svcMgr = None + gSvcMgr.run() # run to some end state + gSvcMgr = self._svcMgr = None def _buildCmdLineParser(self): parser = argparse.ArgumentParser( @@ -3145,7 +3167,7 @@ class MainExec: 1. You build TDengine in the top level ./build directory, as described in offical docs 2. You run the server there before this script: ./build/bin/taosd -c test/cfg - ''')) + ''')) parser.add_argument( '-a', @@ -3209,7 +3231,7 @@ class MainExec: '-n', '--dynamic-db-table-names', action='store_true', - help='Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false)') + help='Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false)') parser.add_argument( '-o', '--num-dnodes', @@ -3259,19 +3281,18 @@ class MainExec: return parser - - def init(self): # TODO: refactor + def init(self): # TODO: refactor global gContainer - gContainer = Container() # micky-mouse DI + gContainer = Container() # micky-mouse DI - global gSvcMgr # TODO: refactor away + global gSvcMgr # TODO: refactor away gSvcMgr = None parser = self._buildCmdLineParser() Config.init(parser) # Sanity check for arguments - if Config.getConfig().use_shadow_db and Config.getConfig().max_dbs>1 : + if Config.getConfig().use_shadow_db and Config.getConfig().max_dbs > 1: raise CrashGenError("Cannot combine use-shadow-db with max-dbs of more than 1") Logging.clsInit(Config.getConfig().debug) @@ -3282,10 +3303,10 @@ class MainExec: if Config.getConfig().run_tdengine: # run server try: self.runService() - return 0 # success + return 0 # success except ConnectionError as err: Logging.error("Failed to make DB connection, please check DB instance manually") - return -1 # failure + return -1 # failure else: return self.runClient() @@ -3294,7 +3315,7 @@ class Container(): _propertyList = {'defTdeInstance'} def __init__(self): - self._cargo = {} # No cargo at the beginning + self._cargo = {} # No cargo at the beginning def _verifyValidProperty(self, name): if not name in self._propertyList: @@ -3303,10 +3324,10 @@ class Container(): # Called for an attribute, when other mechanisms fail (compare to __getattribute__) def __getattr__(self, name): self._verifyValidProperty(name) - return self._cargo[name] # just a simple lookup + return self._cargo[name] # just a simple lookup def __setattr__(self, name, value): - if name == '_cargo' : # reserved vars + if name == '_cargo': # reserved vars super().__setattr__(name, value) return self._verifyValidProperty(name) diff --git a/tests/script/sh/checkAsan.sh b/tests/script/sh/checkAsan.sh index 31fe80829f..20d1359da8 100755 --- a/tests/script/sh/checkAsan.sh +++ b/tests/script/sh/checkAsan.sh @@ -51,7 +51,10 @@ python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l` # /root/TDengine/source/libs/scalar/src/sclvector.c:1075:66: runtime error: signed integer overflow: 9223372034707292160 + 1668838476672 cannot be represented in type 'long int' # /root/TDengine/source/common/src/tdataformat.c:1876:7: runtime error: signed integer overflow: 8252423483843671206 + 2406154664059062870 cannot be represented in type 'long int' -runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type"| grep -v "signed integer overflow" |grep -v "strerror.c"| grep -v "asan_malloc_linux.cc" |wc -l` +# /home/chr/TDengine/source/libs/scalar/src/filter.c:3149:14: runtime error: applying non-zero offset 18446744073709551615 to null pointer +# /home/TDinternal/community/source/libs/scalar/src/sclvector.c:1109:66: runtime error: signed integer overflow: 9223372034707292160 + 1676867897049 cannot be represented in type 'long int' + +runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type"| grep -v "signed integer overflow" |grep -v "strerror.c"| grep -v "asan_malloc_linux.cc" |grep -v "filter.c:3149:14" |wc -l` echo -e "\033[44;32;1m"asan error_num: $error_num"\033[0m" echo -e "\033[44;32;1m"asan memory_leak: $memory_leak"\033[0m" diff --git a/tests/script/tsim/alter/dnode.sim b/tests/script/tsim/alter/dnode.sim index 8cfa86a88a..f9b794924b 100644 --- a/tests/script/tsim/alter/dnode.sim +++ b/tests/script/tsim/alter/dnode.sim @@ -58,6 +58,8 @@ sql_error alter dnode 1 'monDebugFlag 131' sql_error alter dnode 1 'cqDebugFlag 131' sql_error alter dnode 1 'httpDebugFlag 131' sql_error alter dnode 1 'mqttDebugFlag 131' +sql_error alter dnode 1 'qDebugFlaga 131' +sql_error alter all dnodes 'qDebugFlaga 131' sql_error alter dnode 2 'wDebugFlag' '135' sql_error alter dnode 2 'tmrDebugFlag' '135' @@ -65,6 +67,8 @@ sql_error alter dnode 1 'monDebugFlag' '131' sql_error alter dnode 1 'cqDebugFlag' '131' sql_error alter dnode 1 'httpDebugFlag' '131' sql_error alter dnode 1 'mqttDebugFlag' '131' +sql_error alter dnode 1 'qDebugFlaga' '131' +sql_error alter all dnodes 'qDebugFlaga' '131' print ======== step3 sql_error alter $hostname1 debugFlag 135 diff --git a/tests/script/tsim/compute/block_dist.sim b/tests/script/tsim/compute/block_dist.sim index 4fdcf63e34..772959644e 100644 --- a/tests/script/tsim/compute/block_dist.sim +++ b/tests/script/tsim/compute/block_dist.sim @@ -81,7 +81,6 @@ $nt = $ntPrefix . $i #sql select _block_dist() from $nt print show table distributed $nt -sql_error show table distributed $nt #if $rows == 0 then # return -1 diff --git a/tests/script/tsim/parser/nestquery.sim b/tests/script/tsim/parser/nestquery.sim index 494c3de99f..2a363de43d 100644 --- a/tests/script/tsim/parser/nestquery.sim +++ b/tests/script/tsim/parser/nestquery.sim @@ -351,7 +351,7 @@ sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0 print ===========>td-4805 sql_error select tbname, i from (select * from nest_tb0) group by i; -sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1; +sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1 order by c1; if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim index c08b1bbf27..fe388a611e 100644 --- a/tests/script/tsim/parser/regressiontest.sim +++ b/tests/script/tsim/parser/regressiontest.sim @@ -143,9 +143,11 @@ sql delete from t1 where ts<=1537146409500 sql flush database $db +print ======================================>TS-2639 +sql show table distributed t1; + print =====================================>TD-22007 sql select count(*) from t1 interval(10a) - sql drop table t1 sql create table st1 (ts timestamp, k int) tags(a int); @@ -165,7 +167,7 @@ if $data00 != 10 then return -1 endi -sql select last_row(*) from st1 group by a +sql select last_row(*) from st1 group by a order by a desc if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/scalar/caseWhen.sim b/tests/script/tsim/scalar/caseWhen.sim index f6b9c3ff08..c10413f23c 100644 --- a/tests/script/tsim/scalar/caseWhen.sim +++ b/tests/script/tsim/scalar/caseWhen.sim @@ -519,7 +519,7 @@ if $rows != 0 then return -1 endi -sql select sum(f1),count(f1) from tba1 partition by case when f1 then f1 when 1 then 1 end; +sql select sum(f1) v,count(f1) from tba1 partition by case when f1 then f1 when 1 then 1 end order by v; if $rows != 2 then return -1 endi diff --git a/tests/script/tsim/stream/deleteSession.sim b/tests/script/tsim/stream/deleteSession.sim index 541609633b..c3c64a5977 100644 --- a/tests/script/tsim/stream/deleteSession.sim +++ b/tests/script/tsim/stream/deleteSession.sim @@ -524,6 +524,112 @@ if $data42 != 14 then goto loop20 endi +sql drop database if exists test4; +sql drop stream if exists streams4; +sql create database test4 vgroups 1; +sql use test4; +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +print create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); +sql create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname session(ts, 2s); + +sql insert into t1 values(1648791210000,1,2,3); +sql insert into t1 values(1648791220000,2,2,3); +sql insert into t1 values(1648791221000,2,2,3); +sql insert into t1 values(1648791222000,2,2,3); +sql insert into t1 values(1648791223000,2,2,3); +sql insert into t1 values(1648791231000,2,2,3); + +sql insert into t2 values(1648791210000,1,2,3); +sql insert into t2 values(1648791220000,2,2,3); +sql insert into t2 values(1648791221000,2,2,3); +sql insert into t2 values(1648791231000,2,2,3); + +$loop_count = 0 + +loop21: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 6 then + print =====rows=$rows + goto loop21 +endi + +if $data01 != 4 then + print =====data01=$data01 + goto loop21 +endi + +if $data11 != 2 then + print =====data11=$data11 + goto loop21 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop21 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop21 +endi + +if $data41 != 1 then + print =====data41=$data41 + goto loop21 +endi + +if $data51 != 1 then + print =====data51=$data51 + goto loop21 +endi + +print delete from st where ts >= 1648791220000 and ts <=1648791223000; +sql delete from st where ts >= 1648791220000 and ts <=1648791223000; + +loop22: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print =====rows=$rows + goto loop22 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop22 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop22 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop22 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop22 +endi + $loop_all = $loop_all + 1 print ============loop_all=$loop_all diff --git a/tests/script/tsim/stream/deleteState.sim b/tests/script/tsim/stream/deleteState.sim index ecd9f55340..45d9bc3e39 100644 --- a/tests/script/tsim/stream/deleteState.sim +++ b/tests/script/tsim/stream/deleteState.sim @@ -189,6 +189,112 @@ if $data12 != 4 then goto loop6 endi +sql drop database if exists test4; +sql drop stream if exists streams4; +sql create database test4 vgroups 1; +sql use test4; +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + +print create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); +sql create stream streams4 trigger at_once into streamt4 as select _wstart, count(*) c1 from st partition by tbname state_window(c); + +sql insert into t1 values(1648791210000,1,2,1); +sql insert into t1 values(1648791220000,2,2,2); +sql insert into t1 values(1648791221000,2,2,2); +sql insert into t1 values(1648791222000,2,2,2); +sql insert into t1 values(1648791223000,2,2,2); +sql insert into t1 values(1648791231000,2,2,3); + +sql insert into t2 values(1648791210000,1,2,1); +sql insert into t2 values(1648791220000,2,2,2); +sql insert into t2 values(1648791221000,2,2,2); +sql insert into t2 values(1648791231000,2,2,3); + +$loop_count = 0 + +loop21: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 6 then + print =====rows=$rows + goto loop21 +endi + +if $data01 != 4 then + print =====data01=$data01 + goto loop21 +endi + +if $data11 != 2 then + print =====data11=$data11 + goto loop21 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop21 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop21 +endi + +if $data41 != 1 then + print =====data41=$data41 + goto loop21 +endi + +if $data51 != 1 then + print =====data51=$data51 + goto loop21 +endi + +print delete from st where ts >= 1648791220000 and ts <=1648791223000; +sql delete from st where ts >= 1648791220000 and ts <=1648791223000; + +loop22: +sleep 200 +sql select * from streamt4 order by c1 desc;; + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +if $rows != 4 then + print =====rows=$rows + goto loop22 +endi + +if $data01 != 1 then + print =====data01=$data01 + goto loop22 +endi + +if $data11 != 1 then + print =====data11=$data11 + goto loop22 +endi + +if $data21 != 1 then + print =====data21=$data21 + goto loop22 +endi + +if $data31 != 1 then + print =====data31=$data31 + goto loop22 +endi + $loop_all = $loop_all + 1 print ============loop_all=$loop_all diff --git a/tests/script/tsim/stream/fillIntervalRange.sim b/tests/script/tsim/stream/fillIntervalRange.sim new file mode 100644 index 0000000000..a0905141f2 --- /dev/null +++ b/tests/script/tsim/stream/fillIntervalRange.sim @@ -0,0 +1,225 @@ +$loop_all = 0 +looptest: + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start + +sleep 500 +sql connect + +sql drop database if exists test; +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));; +sql create stream streams1 trigger at_once into streamt as select _wstart ts, count(*) c1 from t1 interval(1s) fill(NULL); +sql insert into t1 values(1648791211000,1,2,3,1.0,'aaa'); +sleep 100 +sql insert into t1 values(1648795308000,1,2,3,1.0,'aaa'); + +$loop_count = 0 + +loop0: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 2 then + print =====rows=$rows + goto loop0 +endi + +sql select count(*) from streamt; + +if $data00 != 4098 then + print =====data00=$data00 + goto loop0 +endi + +sql insert into t1 values(1648800308000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop1: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 3 then + print =====rows=$rows + goto loop1 +endi + +sql select count(*) from streamt; + +if $data00 != 9098 then + print =====rows=$rows + goto loop1 +endi + +sql insert into t1 values(1648786211000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop2: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 4 then + print =====rows=$rows + goto loop2 +endi + +sql select count(*) from streamt; + +if $data00 != 14098 then + print =====rows=$rows + goto loop2 +endi + +sql insert into t1 values(1648801308000,1,1,1,1.0,'aaa') (1648802308000,1,1,1,1.0,'aaa') (1648803308000,1,1,1,1.0,'aaa') (1648804308000,1,1,1,1.0,'aaa') (1648805308000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop21: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 +sql select * from streamt where c1 > 0; + +if $rows != 9 then + print =====rows=$rows + goto loop21 +endi + +sql select count(*) from streamt; + +if $data00 != 19098 then + print =====rows=$rows + goto loop21 +endi + +sql drop database if exists test; +sql create database test vgroups 1; +sql use test; + +sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20)); +print create stream streams1 trigger at_once into streamt as select _wstart ts, max(a) c1 from t1 interval(1s) fill(linear); +sql create stream streams1 trigger at_once into streamt as select _wstart ts, max(a) c1 from t1 interval(1s) fill(linear); + +print create stream streams2 trigger at_once into streamt2 as select _wstart ts, max(a) c1 from t1 interval(1s) fill(prev); +sql create stream streams2 trigger at_once into streamt2 as select _wstart ts, max(a) c1 from t1 interval(1s) fill(prev); + +sql insert into t1 values(1648791211000,1,2,3,1.0,'aaa'); +sleep 100 +sql insert into t1 values(1648795308000,1,2,3,1.0,'aaa'); + +$loop_count = 0 + +loop3: + +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 + +print select count(*) from streamt; +sql select count(*) from streamt; + +if $data00 != 4098 then + print =====data00=$data00 + goto loop3 +endi + +print select count(*) from streamt2; +sql select count(*) from streamt2; + +if $data00 != 4098 then + print =====data00=$data00 + goto loop3 +endi + +sql insert into t1 values(1648800308000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop4: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 + +print select count(*) from streamt; +sql select count(*) from streamt; + +if $data00 != 9098 then + print =====rows=$rows + goto loop4 +endi + +print select count(*) from streamt2; +sql select count(*) from streamt2; + +if $data00 != 9098 then + print =====rows=$rows + goto loop4 +endi + +sql insert into t1 values(1648786211000,1,1,1,1.0,'aaa'); + + +$loop_count = 0 + +loop5: +$loop_count = $loop_count + 1 +if $loop_count == 10 then + return -1 +endi + +sleep 500 + +print select count(*) from streamt; +sql select count(*) from streamt; + +if $data00 != 14098 then + print =====rows=$rows + goto loop5 +endi + +print select count(*) from streamt2; +sql select count(*) from streamt2; + +if $data00 != 14098 then + print =====rows=$rows + goto loop5 +endi + +system sh/stop_dnodes.sh + +#goto looptest diff --git a/tests/system-test/0-others/tag_index_basic.py b/tests/system-test/0-others/tag_index_basic.py new file mode 100644 index 0000000000..195d8910e7 --- /dev/null +++ b/tests/system-test/0-others/tag_index_basic.py @@ -0,0 +1,214 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +from util.log import * +from util.cases import * +from util.sql import * +from util.common import * +from util.sqlset import * +import random + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'varchar(20)', + 'col13': 'nchar(20)' + } + self.tag_dict = { + 't1': 'tinyint', + 't2': 'smallint', + 't3': 'int', + 't4': 'bigint', + 't5': 'tinyint unsigned', + 't6': 'smallint unsigned', + 't7': 'int unsigned', + 't8': 'bigint unsigned', + 't9': 'float', + 't10': 'double', + 't11': 'bool', + 't12': 'varchar(20)', + 't13': 'nchar(20)', + 't14': 'timestamp' + } + + + def set_stb_sql(self,stbname,column_dict,tag_dict): + column_sql = '' + tag_sql = '' + for k,v in column_dict.items(): + column_sql += f"{k} {v}, " + for k,v in tag_dict.items(): + tag_sql += f"{k} {v}, " + create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})' + return create_stb_sql + + # create stable and child tables + def create_table(self, stbname, tbname, count): + tdSql.prepare() + tdSql.execute('use db') + + # create stable + create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict) + tdSql.execute(create_table_sql) + + # create child table + for i in range(count): + ti = i % 128 + tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"var{i}","nch{i}",now' + sql = f'create table {tbname}{i} using {stbname} tags({tags})' + tdSql.execute(sql) + + tdLog.info(f" create {count} child tables ok.") + + + # create stable and child tables + def create_tagidx(self, stbname): + cnt = -1 + for key in self.tag_dict.keys(): + # first tag have default index, so skip + if cnt == -1: + cnt = 0 + continue; + sql = f'create index idx_{key} on {stbname} ({key})' + tdLog.info(f" sql={sql}") + tdSql.execute(sql) + cnt += 1 + tdLog.info(f' create {cnt} tag indexs ok.') + + # insert to child table d1 data + def insert_data(self, tbname): + # d1 insert 3 rows + for i in range(3): + sql = f'insert into {tbname}1(ts,col1) values(now,{i});' + tdSql.execute(sql) + # d20 insert 4 + for i in range(4): + sql = f'insert into {tbname}20(ts,col1) values(now,{i});' + tdSql.execute(sql) + + # check show indexs + def show_tagidx(self, stbname): + sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' + tdSql.query(sql) + rows = len(self.tag_dict.keys())-1 + tdSql.checkRows(rows) + + for i in range(rows): + col_name = tdSql.getData(i, 1) + idx_name = f'idx_{col_name}' + tdSql.checkData(i, 0, idx_name) + + tdLog.info(f' show {rows} tag indexs ok.') + + # query with tag idx + def query_tagidx(self, stbname): + sql = f'select * from meters where t1=1' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = f'select * from meters where t2<10' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = f'select * from meters where t2>10' + tdSql.query(sql) + tdSql.checkRows(4) + + sql = f'select * from meters where t3<30' + tdSql.query(sql) + tdSql.checkRows(7) + + sql = f'select * from meters where t12="11"' + tdSql.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where (t4 < 10 or t5 = 20) and t6= 30' + tdSql.query(sql) + tdSql.checkRows(0) + + sql = f'select * from meters where (t7 < 20 and t8 = 20) or t4 = 20' + tdSql.query(sql) + tdSql.checkRows(4) + + # drop child table + def drop_tables(self, tbname, count): + # table d1 and d20 have verify data , so can not drop + start = random.randint(21, count/2) + end = random.randint(count/2 + 1, count - 1) + for i in range(start, end): + sql = f'drop table {tbname}{i}' + tdSql.execute(sql) + cnt = end - start + 1 + tdLog.info(f' drop table from {start} to {end} count={cnt}') + + # drop tag index + def drop_tagidx(self, stbname): + # drop index + cnt = -1 + for key in self.tag_dict.keys(): + # first tag have default index, so skip + if cnt == -1: + cnt = 0 + continue; + sql = f'drop index idx_{key}' + tdSql.execute(sql) + cnt += 1 + + # check idx result is 0 + sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' + tdSql.query(sql) + tdSql.checkRows(0) + tdLog.info(f' drop {cnt} tag indexs ok.') + + # run + def run(self): + # var + stable = "meters" + tbname = "d" + count = 1000 + # do + self.create_table(stable, tbname, count) + self.create_tagidx(stable) + self.insert_data(tbname) + self.show_tagidx(stable) + self.query_tagidx(stable) + self.drop_tables(tbname, count) + self.drop_tagidx(stable) + # query after delete , expect no crash + self.query_tagidx(stable) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/0-others/user_manage.py b/tests/system-test/0-others/user_manage.py index 5148e26b39..6f90a2873a 100644 --- a/tests/system-test/0-others/user_manage.py +++ b/tests/system-test/0-others/user_manage.py @@ -12,12 +12,13 @@ # -*- coding: utf-8 -*- import taos -from util.log import * -from util.cases import * -from util.sql import * -from util.common import * -from util.sqlset import * from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + class TDTestCase: def init(self, conn, logSql, replicaVar=1): @@ -26,10 +27,10 @@ class TDTestCase: tdSql.init(conn.cursor()) self.setsql = TDSetSql() self.stbname = 'stb' - self.binary_length = 20 # the length of binary for column_dict + self.binary_length = 20 # the length of binary for column_dict self.nchar_length = 20 # the length of nchar for column_dict self.column_dict = { - 'ts' : 'timestamp', + 'ts': 'timestamp', 'col1': 'tinyint', 'col2': 'smallint', 'col3': 'int', @@ -45,7 +46,7 @@ class TDTestCase: 'col13': f'nchar({self.nchar_length})' } self.tag_dict = { - 'ts_tag' : 'timestamp', + 'ts_tag': 'timestamp', 't1': 'tinyint', 't2': 'smallint', 't3': 'int', @@ -67,25 +68,28 @@ class TDTestCase: f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"' ] self.tbnum = 1 + def prepare_data(self): - tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) for i in range(self.tbnum): tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})') for j in self.values_list: tdSql.execute(f'insert into {self.stbname}_{i} values({j})') + def create_user(self): - for user_name in ['jiacy1_all','jiacy1_read','jiacy1_write','jiacy1_none','jiacy0_all','jiacy0_read','jiacy0_write','jiacy0_none']: + for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy1_write', 'jiacy1_none', 'jiacy0_all', 'jiacy0_read', + 'jiacy0_write', 'jiacy0_none']: if 'jiacy1' in user_name.lower(): tdSql.execute(f'create user {user_name} pass "123" sysinfo 1') elif 'jiacy0' in user_name.lower(): tdSql.execute(f'create user {user_name} pass "123" sysinfo 0') - for user_name in ['jiacy1_all','jiacy1_read','jiacy0_all','jiacy0_read']: + for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy0_all', 'jiacy0_read']: tdSql.execute(f'grant read on db to {user_name}') - for user_name in ['jiacy1_all','jiacy1_write','jiacy0_all','jiacy0_write']: + for user_name in ['jiacy1_all', 'jiacy1_write', 'jiacy0_all', 'jiacy0_write']: tdSql.execute(f'grant write on db to {user_name}') def user_privilege_check(self): - jiacy1_read_conn = taos.connect(user='jiacy1_read',password='123') + jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123') sql = "create table ntb (ts timestamp,c0 int)" expectErrNotOccured = True try: @@ -94,32 +98,34 @@ class TDTestCase: expectErrNotOccured = False if expectErrNotOccured: caller = inspect.getframeinfo(inspect.stack()[1][0]) - tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured" ) + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured") else: self.queryRows = 0 self.queryCols = 0 self.queryResult = None tdLog.info(f"sql:{sql}, expect error occured") pass + def drop_topic(self): - jiacy1_all_conn = taos.connect(user='jiacy1_all',password='123') - jiacy1_read_conn = taos.connect(user='jiacy1_read',password='123') - jiacy1_write_conn = taos.connect(user='jiacy1_write',password='123') - jiacy1_none_conn = taos.connect(user='jiacy1_none',password='123') - jiacy0_all_conn = taos.connect(user='jiacy0_all',password='123') - jiacy0_read_conn = taos.connect(user='jiacy0_read',password='123') - jiacy0_write_conn = taos.connect(user='jiacy0_write',password='123') - jiacy0_none_conn = taos.connect(user='jiacy0_none',password='123') + jiacy1_all_conn = taos.connect(user='jiacy1_all', password='123') + jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123') + jiacy1_write_conn = taos.connect(user='jiacy1_write', password='123') + jiacy1_none_conn = taos.connect(user='jiacy1_none', password='123') + jiacy0_all_conn = taos.connect(user='jiacy0_all', password='123') + jiacy0_read_conn = taos.connect(user='jiacy0_read', password='123') + jiacy0_write_conn = taos.connect(user='jiacy0_write', password='123') + jiacy0_none_conn = taos.connect(user='jiacy0_none', password='123') tdSql.execute('create topic root_db as select * from db.stb') - for user in [jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: + for user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: user.execute(f'create topic db_jiacy as select * from db.stb') user.execute('drop topic db_jiacy') - for user in [jiacy1_write_conn,jiacy1_none_conn,jiacy0_write_conn,jiacy0_none_conn,jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: + for user in [jiacy1_write_conn, jiacy1_none_conn, jiacy0_write_conn, jiacy0_none_conn, jiacy1_all_conn, + jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: sql_list = [] - if user in [jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: + if user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]: sql_list = ['drop topic root_db'] - elif user in [jiacy1_write_conn,jiacy1_none_conn,jiacy0_write_conn,jiacy0_none_conn]: - sql_list = ['drop topic root_db','create topic db_jiacy as select * from db.stb'] + elif user in [jiacy1_write_conn, jiacy1_none_conn, jiacy0_write_conn, jiacy0_none_conn]: + sql_list = ['drop topic root_db', 'create topic db_jiacy as select * from db.stb'] for sql in sql_list: expectErrNotOccured = True try: @@ -128,33 +134,26 @@ class TDTestCase: expectErrNotOccured = False if expectErrNotOccured: caller = inspect.getframeinfo(inspect.stack()[1][0]) - tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured" ) + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured") else: self.queryRows = 0 self.queryCols = 0 self.queryResult = None tdLog.info(f"sql:{sql}, expect error occured") + def tmq_commit_cb_print(tmq, resp, param=None): print(f"commit: {resp}, tmq: {tmq}, param: {param}") + def subscribe_topic(self): print("create topic") tdSql.execute('create topic db_topic as select * from db.stb') tdSql.execute('grant subscribe on db_topic to jiacy1_all') print("build consumer") - conf = TaosTmqConf() - conf.set("group.id", "tg2") - conf.set("td.connect.user", "jiacy1_all") - conf.set("td.connect.pass", "123") - conf.set("enable.auto.commit", "true") - conf.set_auto_commit_cb(self.tmq_commit_cb_print, None) - tmq = conf.new_consumer() + tmq = Consumer({"group.id": "tg2", "td.connect.user": "jiacy1_all", "td.connect.pass": "123", + "enable.auto.commit": "true"}) print("build topic list") - topic_list = TaosTmqList() - topic_list.append("db_topic") + tmq.subscribe(["db_topic"]) print("basic consume loop") - tmq.subscribe(topic_list) - sub_list = tmq.subscription() - print("subscribed topics: ", sub_list) c = 0 l = 0 for i in range(10): @@ -163,20 +162,23 @@ class TDTestCase: res = tmq.poll(10) print(f"loop {l}") l += 1 - if res: - c += 1 - topic = res.get_topic_name() - vg = res.get_vgroup_id() - db = res.get_db_name() - print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}") - for row in res: - print(row) - print("* committed") - tmq.commit(res) - else: + if not res: print(f"received empty message at loop {l} (committed {c})") - pass - + continue + if res.error(): + print(f"consumer error at loop {l} (committed {c}) {res.error()}") + continue + + c += 1 + topic = res.topic() + db = res.database() + print(f"topic: {topic}\ndb: {db}") + + for row in res: + print(row.fetchall()) + print("* committed") + tmq.commit(res) + def run(self): tdSql.prepare() self.create_user() @@ -184,9 +186,11 @@ class TDTestCase: self.drop_topic() self.user_privilege_check() self.subscribe_topic() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) + tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 5077b70d72..d7344c631f 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -2025,15 +2025,16 @@ class TDTestCase: tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + tdLog.printNoPrefix("==========step13:stable cases") - #tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") #tdSql.checkRows(13) #tdSql.query(f"select interp(c0) from {dbname}.{ctbname1} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)") #tdSql.checkRows(13) - #tdSql.query(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)") + tdSql.error(f"select interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1s) fill(null)") #tdSql.checkRows(13) #tdSql.query(f"select _irowts,interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:04', '2020-02-02 00:00:16') every(1h) fill(prev)") diff --git a/tests/system-test/2-query/max_min_last_interval.py b/tests/system-test/2-query/max_min_last_interval.py new file mode 100644 index 0000000000..553060fd4c --- /dev/null +++ b/tests/system-test/2-query/max_min_last_interval.py @@ -0,0 +1,1711 @@ +# author : wenzhouwww +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), True) + + def prepare_data(self): + tdSql.execute(f" CREATE TABLE `tb` (`ts` TIMESTAMP, `open` DOUBLE, `close` DOUBLE, `high` DOUBLE, `low` DOUBLE, `vol` DOUBLE, `amount` DOUBLE, `preclose` DOUBLE) ") + + tdSql.execute(f"insert into tb values ('2020-01-02 09:31:00',11.2,11.08,11.24,11.06,907000.0,10149188.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:32:00',11.08,10.99,11.08,10.96,301000.0,3323031.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:33:00',10.99,11.05,11.08,10.95,269300.0,2966591.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:34:00',11.08,11.0,11.08,10.99,239100.0,2635055.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:35:00',10.99,10.94,10.99,10.93,267200.0,2926850.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:36:00',10.95,10.97,10.98,10.93,202300.0,2216126.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:37:00',10.98,10.96,10.99,10.96,206400.0,2263399.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:38:00',10.96,10.96,10.97,10.95,197900.0,2168095.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:39:00',10.96,10.94,10.98,10.93,133100.0,1457263.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:40:00',10.93,10.95,10.97,10.93,102400.0,1120751.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:41:00',10.93,10.93,10.95,10.91,175000.0,1912470.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:42:00',10.92,10.92,10.96,10.92,128000.0,1399009.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:43:00',10.95,10.92,10.97,10.92,201500.0,2202114.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:44:00',10.91,10.9,10.92,10.9,198300.0,2162914.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:45:00',10.9,10.86,10.9,10.84,319100.0,3468665.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:46:00',10.87,10.9,10.9,10.86,222700.0,2420930.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:47:00',10.9,10.89,10.93,10.88,126500.0,1379156.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:48:00',10.91,10.95,10.96,10.91,44600.0,487778.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:49:00',10.96,10.96,10.98,10.95,95300.0,1045077.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:50:00',10.94,10.97,10.99,10.93,161900.0,1775641.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:51:00',10.97,11.05,11.05,10.97,156300.0,1722227.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:52:00',11.05,11.06,11.08,11.04,194200.0,2146643.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:53:00',11.06,11.03,11.06,11.02,187000.0,2062967.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:54:00',11.02,11.01,11.03,11.0,60800.0,670041.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:55:00',11.01,11.07,11.07,11.01,186400.0,2056238.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:56:00',11.08,11.03,11.08,11.03,107100.0,1185077.5799999982,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:57:00',11.03,11.04,11.06,11.02,118700.0,1308724.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:58:00',11.03,11.05,11.06,11.03,23600.0,260707.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 09:59:00',11.05,11.03,11.05,11.03,38200.0,421723.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:00:00',11.03,11.04,11.05,11.03,77600.0,856134.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:01:00',11.04,11.03,11.04,11.03,34000.0,375159.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:02:00',11.04,11.04,11.05,11.03,67900.0,749782.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:03:00',11.05,11.04,11.06,11.04,67822.0,749311.8800000027,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:04:00',11.04,11.04,11.05,11.03,39878.0,440388.8999999985,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:05:00',11.04,11.04,11.04,11.03,10000.0,110358.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:06:00',11.04,11.02,11.04,11.02,71200.0,785535.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:07:00',11.02,11.04,11.04,11.02,41500.0,457611.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:08:00',11.03,11.04,11.05,11.03,11700.0,129195.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:09:00',11.04,11.03,11.05,11.02,173300.0,1913275.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:10:00',11.03,11.04,11.04,11.02,70000.0,771276.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:11:00',11.03,11.06,11.06,11.02,169200.0,1869131.1400000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:12:00',11.06,11.07,11.07,11.05,64800.0,716812.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:13:00',11.06,11.06,11.07,11.06,16400.0,181433.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:14:00',11.06,11.11,11.11,11.06,298500.0,3307379.5,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:15:00',11.11,11.11,11.14,11.11,95000.0,1056803.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:16:00',11.14,11.12,11.15,11.12,196300.0,2186954.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:17:00',11.12,11.12,11.15,11.12,169000.0,1881848.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:18:00',11.13,11.15,11.15,11.13,58700.0,653823.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:19:00',11.15,11.13,11.15,11.13,183100.0,2040752.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:20:00',11.13,11.13,11.14,11.12,57600.0,641102.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:21:00',11.13,11.14,11.14,11.13,89700.0,998969.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:22:00',11.14,11.11,11.14,11.11,50500.0,561017.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:23:00',11.1,11.12,11.12,11.1,83300.0,925194.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:24:00',11.12,11.09,11.12,11.09,15100.0,167627.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:25:00',11.1,11.1,11.11,11.1,8600.0,95462.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:26:00',11.09,11.12,11.17,11.09,270300.0,3011716.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:27:00',11.12,11.18,11.18,11.12,69600.0,777739.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:28:00',11.17,11.19,11.2,11.17,237400.0,2655762.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:29:00',11.19,11.21,11.21,11.18,103400.0,1157957.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:30:00',11.21,11.18,11.21,11.16,58000.0,649227.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:31:00',11.17,11.2,11.2,11.17,187000.0,2093130.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:32:00',11.21,11.23,11.25,11.2,212000.0,2378486.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:33:00',11.24,11.34,11.34,11.23,501637.0,5661866.099999994,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:34:00',11.33,11.41,11.42,11.31,612000.0,6956284.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:35:00',11.41,11.38,11.45,11.38,575600.0,6575579.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:36:00',11.38,11.39,11.39,11.35,328800.0,3738689.4900000095,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:37:00',11.35,11.41,11.41,11.35,230516.0,2626204.4399999976,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:38:00',11.4,11.36,11.4,11.35,137497.0,1562526.9200000018,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:39:00',11.36,11.37,11.39,11.33,249100.0,2828453.7099999934,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:40:00',11.35,11.35,11.37,11.34,205987.0,2340070.950000003,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:41:00',11.35,11.4,11.4,11.35,149496.0,1702321.1899999976,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:42:00',11.38,11.41,11.42,11.38,342971.0,3909339.100000009,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:43:00',11.41,11.4,11.42,11.39,291326.0,3322693.9799999893,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:44:00',11.4,11.48,11.48,11.4,443425.0,5075111.1400000155,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:45:00',11.46,11.44,11.46,11.44,125734.0,1439362.2999999821,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:46:00',11.44,11.44,11.45,11.44,152887.0,1750929.900000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:47:00',11.45,11.44,11.46,11.44,81013.0,927533.7199999988,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:48:00',11.43,11.4,11.44,11.4,79475.0,907338.5,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:49:00',11.4,11.4,11.43,11.36,167652.0,1911327.0600000024,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:50:00',11.41,11.39,11.41,11.38,23800.0,271169.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:51:00',11.38,11.37,11.39,11.37,36761.0,418027.56999999285,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:52:00',11.38,11.38,11.39,11.37,109039.0,1240554.9900000095,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:53:00',11.37,11.37,11.4,11.37,189396.0,2156768.5200000107,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:54:00',11.39,11.37,11.39,11.37,104404.0,1187164.4799999893,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:55:00',11.37,11.35,11.37,11.35,86980.0,988150.8000000119,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:56:00',11.35,11.37,11.37,11.34,96736.0,1097808.599999994,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:57:00',11.36,11.36,11.38,11.36,62323.0,708217.2800000012,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:58:00',11.37,11.34,11.37,11.34,135782.0,1541601.1499999762,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 10:59:00',11.35,11.34,11.35,11.34,38379.0,435287.8600000143,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:00:00',11.34,11.36,11.36,11.34,56176.0,637639.8400000036,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:01:00',11.36,11.36,11.37,11.35,56000.0,636169.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:02:00',11.35,11.38,11.38,11.35,120151.0,1365796.3599999845,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:03:00',11.38,11.4,11.4,11.37,91258.0,1039128.0400000215,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:04:00',11.4,11.39,11.4,11.38,86994.0,991647.6599999964,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:05:00',11.39,11.38,11.4,11.38,75903.0,864927.2199999988,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:06:00',11.4,11.4,11.4,11.39,37100.0,422815.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:07:00',11.4,11.39,11.4,11.39,64900.0,739346.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:08:00',11.39,11.38,11.39,11.38,58600.0,666998.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:09:00',11.38,11.4,11.4,11.36,144417.0,1644390.650000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:10:00',11.37,11.4,11.4,11.35,59152.0,673655.7599999905,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:11:00',11.41,11.4,11.41,11.39,48700.0,555351.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:12:00',11.4,11.42,11.42,11.39,79800.0,910197.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:13:00',11.41,11.43,11.44,11.41,49000.0,559999.3400000036,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:14:00',11.43,11.44,11.44,11.43,65900.0,753786.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:15:00',11.44,11.43,11.44,11.43,21200.0,242402.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:16:00',11.43,11.43,11.44,11.43,77900.0,890539.0499999821,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:17:00',11.42,11.42,11.43,11.41,72600.0,828962.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:18:00',11.42,11.38,11.42,11.38,127200.0,1451263.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:19:00',11.38,11.41,11.41,11.38,35700.0,406927.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:20:00',11.4,11.41,11.41,11.4,17900.0,204224.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:21:00',11.41,11.41,11.41,11.4,44400.0,506283.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:22:00',11.41,11.4,11.41,11.4,101700.0,1159426.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:23:00',11.4,11.38,11.4,11.36,61548.0,700285.2400000095,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:24:00',11.36,11.38,11.38,11.35,20852.0,237018.75999999046,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:25:00',11.37,11.36,11.37,11.36,36548.0,415406.24000000954,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:26:00',11.35,11.36,11.37,11.35,52800.0,599478.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:27:00',11.36,11.35,11.36,11.35,51200.0,581300.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:28:00',11.35,11.34,11.36,11.34,45700.0,518665.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:29:00',11.34,11.32,11.34,11.32,47883.0,542748.2199999988,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 11:30:00',11.32,11.33,11.33,11.32,37000.0,419126.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:01:00',11.33,11.33,11.35,11.33,82917.0,939614.2800000012,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:02:00',11.33,11.31,11.34,11.31,58750.0,665362.5,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:03:00',11.31,11.31,11.32,11.31,90400.0,1022443.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:04:00',11.32,11.31,11.32,11.3,120900.0,1366511.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:05:00',11.31,11.32,11.32,11.3,106400.0,1203291.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:06:00',11.32,11.36,11.36,11.32,67000.0,759965.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:07:00',11.36,11.36,11.37,11.34,95400.0,1083468.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:08:00',11.36,11.33,11.37,11.33,93900.0,1066666.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:09:00',11.33,11.34,11.34,11.32,37900.0,429351.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:10:00',11.33,11.33,11.33,11.32,26300.0,297964.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:11:00',11.33,11.28,11.33,11.28,90600.0,1024723.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:12:00',11.29,11.28,11.3,11.27,114839.0,1295492.9200000167,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:13:00',11.28,11.28,11.29,11.28,48300.0,544981.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:14:00',11.28,11.28,11.29,11.27,68561.0,773163.0799999833,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:15:00',11.28,11.27,11.29,11.27,84300.0,950882.6100000143,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:16:00',11.27,11.27,11.28,11.27,32000.0,360755.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:17:00',11.27,11.26,11.27,11.26,42220.0,475777.1999999881,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:18:00',11.26,11.25,11.26,11.25,37480.0,421951.8000000119,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:19:00',11.25,11.26,11.26,11.25,8800.0,99066.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:20:00',11.26,11.26,11.26,11.25,36400.0,409669.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:21:00',11.26,11.24,11.26,11.24,43500.0,489525.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:22:00',11.24,11.24,11.25,11.24,42500.0,477967.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:23:00',11.25,11.25,11.25,11.24,27500.0,309368.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:24:00',11.25,11.25,11.26,11.25,14800.0,166644.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:25:00',11.25,11.25,11.26,11.25,6300.0,70906.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:26:00',11.25,11.26,11.26,11.25,31500.0,354602.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:27:00',11.25,11.27,11.27,11.25,34500.0,388590.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:28:00',11.26,11.26,11.27,11.26,42700.0,481092.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:29:00',11.27,11.27,11.27,11.25,86020.0,968959.3999999762,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:30:00',11.27,11.26,11.28,11.26,28180.0,317608.60000002384,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:31:00',11.26,11.26,11.27,11.26,10620.0,119642.19999998808,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:32:00',11.26,11.26,11.27,11.26,72200.0,813073.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:33:00',11.26,11.26,11.27,11.26,29400.0,331219.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:34:00',11.26,11.27,11.27,11.26,14400.0,162189.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:35:00',11.27,11.25,11.27,11.25,22480.0,253073.80000001192,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:36:00',11.25,11.25,11.26,11.25,26600.0,299455.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:37:00',11.25,11.26,11.26,11.25,80520.0,906359.1999999881,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:38:00',11.27,11.25,11.27,11.25,16180.0,182079.80000001192,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:39:00',11.25,11.23,11.26,11.23,106000.0,1191411.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:40:00',11.23,11.23,11.24,11.23,91100.0,1023446.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:41:00',11.23,11.22,11.24,11.22,35400.0,397556.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:42:00',11.23,11.2,11.23,11.2,172700.0,1936596.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:43:00',11.21,11.19,11.21,11.19,89700.0,1004799.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:44:00',11.2,11.19,11.2,11.18,57100.0,639239.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:45:00',11.19,11.2,11.2,11.19,67595.0,756911.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:46:00',11.2,11.2,11.21,11.2,115000.0,1288434.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:47:00',11.19,11.2,11.2,11.19,27200.0,304467.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:48:00',11.19,11.19,11.19,11.18,43700.0,488802.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:49:00',11.19,11.19,11.19,11.18,48700.0,544675.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:50:00',11.19,11.19,11.22,11.18,98500.0,1103443.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:51:00',11.19,11.2,11.21,11.19,67500.0,756052.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:52:00',11.2,11.2,11.22,11.19,43600.0,488300.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:53:00',11.2,11.2,11.2,11.19,52600.0,589105.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:54:00',11.2,11.2,11.2,11.19,60300.0,675155.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:55:00',11.2,11.19,11.2,11.18,59100.0,661067.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:56:00',11.19,11.2,11.21,11.18,68700.0,768996.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:57:00',11.2,11.21,11.22,11.19,74900.0,839752.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:58:00',11.22,11.21,11.22,11.21,9600.0,107664.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 13:59:00',11.21,11.21,11.22,11.21,20900.0,234373.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:00:00',11.22,11.23,11.24,11.21,68500.0,768957.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:01:00',11.23,11.24,11.24,11.23,30200.0,339429.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:02:00',11.25,11.22,11.25,11.22,38700.0,434942.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:03:00',11.22,11.24,11.25,11.22,58720.0,660538.3999999762,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:04:00',11.24,11.24,11.25,11.24,19400.0,218099.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:05:00',11.25,11.24,11.25,11.23,31100.0,349744.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:06:00',11.24,11.24,11.24,11.23,6500.0,73064.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:07:00',11.24,11.23,11.24,11.22,21200.0,238054.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:08:00',11.22,11.22,11.24,11.22,6300.0,70731.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:09:00',11.22,11.22,11.24,11.22,5600.0,62878.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:10:00',11.22,11.23,11.23,11.22,5600.0,62866.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:11:00',11.23,11.24,11.24,11.22,36000.0,404370.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:12:00',11.24,11.24,11.25,11.22,55300.0,622032.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:13:00',11.25,11.23,11.25,11.23,32600.0,366576.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:14:00',11.23,11.24,11.25,11.23,21000.0,236082.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:15:00',11.24,11.26,11.26,11.24,64500.0,725876.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:16:00',11.26,11.27,11.28,11.26,82200.0,926583.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:17:00',11.27,11.26,11.28,11.26,45600.0,514013.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:18:00',11.27,11.26,11.27,11.26,17400.0,195986.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:19:00',11.27,11.27,11.29,11.27,273200.0,3082769.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:20:00',11.27,11.3,11.3,11.27,175039.0,1977112.3100000024,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:21:00',11.31,11.33,11.34,11.31,128300.0,1453462.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:22:00',11.33,11.35,11.35,11.32,66700.0,756208.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:23:00',11.34,11.35,11.36,11.34,121700.0,1381218.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:24:00',11.36,11.35,11.37,11.35,66600.0,756737.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:25:00',11.35,11.36,11.38,11.35,132600.0,1507997.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:26:00',11.38,11.39,11.4,11.37,149180.0,1699236.400000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:27:00',11.39,11.37,11.39,11.36,35820.0,407303.59999999404,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:28:00',11.37,11.35,11.38,11.35,47900.0,544627.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:29:00',11.35,11.37,11.37,11.35,17400.0,197656.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:30:00',11.36,11.35,11.36,11.35,87300.0,991027.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:31:00',11.35,11.39,11.4,11.35,138680.0,1578201.400000006,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:32:00',11.39,11.4,11.4,11.39,180200.0,2054110.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:33:00',11.4,11.41,11.41,11.39,114300.0,1303708.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:34:00',11.42,11.4,11.42,11.4,160600.0,1833388.1299999952,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:35:00',11.41,11.41,11.42,11.4,97538.0,1112477.580000013,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:36:00',11.41,11.43,11.43,11.41,192487.0,2198982.5399999917,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:37:00',11.43,11.44,11.45,11.43,389100.0,4451469.3900000155,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:38:00',11.44,11.44,11.44,11.43,62400.0,713725.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:39:00',11.44,11.43,11.44,11.43,41100.0,469907.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:40:00',11.43,11.43,11.44,11.43,180100.0,2058619.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:41:00',11.43,11.41,11.44,11.41,83900.0,959046.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:42:00',11.41,11.39,11.41,11.39,53100.0,605290.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:43:00',11.39,11.38,11.39,11.38,97700.0,1112686.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:44:00',11.38,11.37,11.39,11.37,57500.0,654477.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:45:00',11.37,11.4,11.4,11.37,312900.0,3565077.99999997,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:46:00',11.4,11.41,11.41,11.4,98500.0,1123762.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:47:00',11.41,11.42,11.43,11.41,159600.0,1822511.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:48:00',11.42,11.42,11.42,11.41,123400.0,1409109.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:49:00',11.42,11.41,11.43,11.41,154100.0,1760238.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:50:00',11.41,11.42,11.44,11.41,361300.0,4130568.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:51:00',11.42,11.45,11.45,11.42,231257.0,2645775.370000005,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:52:00',11.44,11.44,11.45,11.43,295700.0,3384133.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:53:00',11.41,11.41,11.44,11.41,97500.0,1114794.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:54:00',11.42,11.43,11.44,11.41,145700.0,1665211.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:55:00',11.41,11.39,11.42,11.36,110743.0,1260564.6299999952,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:56:00',11.36,11.37,11.38,11.35,110700.0,1257857.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 14:57:00',11.37,11.39,11.39,11.37,91600.0,1042258.4300000072,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-02 15:00:00',11.35,11.35,11.35,11.35,648000.0,7354800.0,11.02)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:31:00',11.27,11.35,11.35,11.27,194597.0,2194477.45,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:32:00',11.31,11.31,11.36,11.3,97600.0,1104836.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:33:00',11.31,11.25,11.33,11.25,142700.0,1610769.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:34:00',11.28,11.28,11.3,11.26,89100.0,1004739.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:35:00',11.28,11.27,11.28,11.26,65300.0,735698.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:36:00',11.27,11.25,11.27,11.23,198700.0,2235096.999999999,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:37:00',11.23,11.26,11.26,11.23,153800.0,1728714.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:38:00',11.26,11.32,11.32,11.26,59500.0,672484.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:39:00',11.33,11.39,11.4,11.33,250800.0,2849183.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:40:00',11.41,11.38,11.41,11.37,108200.0,1231972.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:41:00',11.39,11.33,11.41,11.33,146000.0,1662606.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:42:00',11.34,11.36,11.37,11.34,132600.0,1506245.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:43:00',11.35,11.44,11.44,11.35,422058.0,4811030.879999999,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:44:00',11.42,11.42,11.44,11.42,106200.0,1213679.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:45:00',11.42,11.39,11.42,11.36,102300.0,1166319.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:46:00',11.39,11.39,11.41,11.38,88100.0,1003877.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:47:00',11.39,11.35,11.4,11.35,132700.0,1508598.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:48:00',11.35,11.33,11.36,11.31,120300.0,1363681.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:49:00',11.32,11.32,11.33,11.3,118200.0,1337038.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:50:00',11.32,11.34,11.34,11.31,154300.0,1747209.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:51:00',11.34,11.34,11.35,11.31,192000.0,2176691.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:52:00',11.33,11.34,11.35,11.33,159100.0,1804359.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:53:00',11.34,11.35,11.35,11.31,245200.0,2779151.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:54:00',11.36,11.38,11.39,11.36,119800.0,1361758.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:55:00',11.37,11.37,11.38,11.36,41000.0,466342.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:56:00',11.36,11.35,11.37,11.35,35000.0,397418.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:57:00',11.36,11.34,11.36,11.3,160300.0,1814162.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:58:00',11.34,11.34,11.35,11.32,22300.0,252845.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 09:59:00',11.34,11.35,11.35,11.34,112600.0,1277749.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:00:00',11.34,11.34,11.35,11.34,65800.0,746178.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:01:00',11.34,11.32,11.34,11.3,226400.0,2561501.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:02:00',11.32,11.34,11.35,11.28,138200.0,1563277.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:03:00',11.34,11.34,11.34,11.3,63800.0,722991.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:04:00',11.34,11.34,11.35,11.34,39100.0,443617.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:05:00',11.35,11.35,11.35,11.33,81200.0,921373.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:06:00',11.35,11.35,11.36,11.35,85200.0,967050.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:07:00',11.36,11.37,11.37,11.35,135300.0,1537200.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:08:00',11.35,11.36,11.38,11.35,69700.0,791922.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:09:00',11.36,11.41,11.41,11.36,296500.0,3376310.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:10:00',11.41,11.41,11.42,11.4,163800.0,1869800.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:11:00',11.42,11.42,11.43,11.41,61900.0,706871.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:12:00',11.42,11.4,11.42,11.4,21600.0,246423.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:13:00',11.4,11.38,11.4,11.37,43000.0,489339.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:14:00',11.38,11.39,11.39,11.37,31200.0,355056.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:15:00',11.38,11.38,11.39,11.37,71400.0,812399.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:16:00',11.38,11.37,11.38,11.37,18300.0,208100.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:17:00',11.37,11.31,11.37,11.31,211200.0,2394958.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:18:00',11.32,11.35,11.36,11.3,146900.0,1661582.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:19:00',11.35,11.34,11.35,11.32,28600.0,324130.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:20:00',11.34,11.35,11.35,11.34,61900.0,701699.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:21:00',11.35,11.34,11.35,11.34,46900.0,531869.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:22:00',11.34,11.35,11.35,11.34,23600.0,267713.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:23:00',11.35,11.35,11.35,11.34,32200.0,365336.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:24:00',11.34,11.34,11.35,11.3,285800.0,3234866.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:25:00',11.34,11.31,11.35,11.3,45600.0,515771.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:26:00',11.34,11.34,11.34,11.32,72300.0,820048.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:27:00',11.34,11.34,11.35,11.34,41900.0,475051.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:28:00',11.34,11.35,11.35,11.34,46800.0,530896.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:29:00',11.35,11.34,11.35,11.33,118000.0,1338109.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:30:00',11.34,11.33,11.35,11.33,61300.0,695369.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:31:00',11.33,11.34,11.35,11.33,24500.0,277860.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:32:00',11.34,11.34,11.35,11.33,27800.0,315082.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:33:00',11.34,11.34,11.35,11.33,48500.0,549676.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:34:00',11.34,11.35,11.35,11.34,10400.0,117943.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:35:00',11.35,11.34,11.35,11.34,13600.0,154225.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:36:00',11.34,11.34,11.35,11.34,13700.0,155372.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:37:00',11.34,11.34,11.34,11.34,83600.0,947807.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:38:00',11.32,11.34,11.35,11.32,130300.0,1475481.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:39:00',11.34,11.33,11.35,11.32,41700.0,473008.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:40:00',11.33,11.33,11.35,11.33,60200.0,682238.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:41:00',11.34,11.35,11.35,11.33,36200.0,410228.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:42:00',11.34,11.33,11.35,11.32,122600.0,1389382.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:43:00',11.33,11.32,11.34,11.3,107600.0,1217485.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:44:00',11.32,11.33,11.34,11.32,52700.0,597355.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:45:00',11.34,11.31,11.34,11.31,30100.0,340572.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:46:00',11.31,11.3,11.32,11.29,57600.0,651033.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:47:00',11.3,11.29,11.3,11.29,13200.0,149081.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:48:00',11.29,11.29,11.3,11.29,25600.0,289111.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:49:00',11.29,11.29,11.3,11.29,25600.0,289165.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:50:00',11.29,11.27,11.3,11.27,115900.0,1308000.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:51:00',11.25,11.27,11.27,11.25,153300.0,1725374.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:52:00',11.28,11.27,11.28,11.27,32400.0,365219.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:53:00',11.27,11.25,11.28,11.25,81000.0,912142.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:54:00',11.25,11.27,11.27,11.24,99400.0,1118078.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:55:00',11.27,11.27,11.27,11.26,27800.0,313174.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:56:00',11.27,11.28,11.28,11.27,12000.0,135354.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:57:00',11.27,11.27,11.28,11.27,15600.0,175857.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:58:00',11.27,11.26,11.28,11.26,75400.0,849417.799999997,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 10:59:00',11.26,11.29,11.29,11.25,114481.0,1289693.8700000048,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:00:00',11.29,11.28,11.29,11.28,23200.0,261798.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:01:00',11.29,11.31,11.31,11.28,52900.0,597503.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:02:00',11.31,11.33,11.33,11.3,52800.0,597613.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:03:00',11.32,11.33,11.33,11.3,57400.0,648994.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:04:00',11.31,11.3,11.32,11.29,55500.0,627200.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:05:00',11.29,11.3,11.3,11.29,4600.0,51942.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:06:00',11.3,11.29,11.3,11.26,92600.0,1043860.1899999976,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:07:00',11.29,11.27,11.29,11.26,51100.0,575911.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:08:00',11.29,11.27,11.29,11.26,46100.0,519247.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:09:00',11.27,11.28,11.29,11.27,17700.0,199690.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:10:00',11.28,11.28,11.29,11.28,28400.0,320413.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:11:00',11.28,11.26,11.28,11.26,31400.0,353981.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:12:00',11.26,11.21,11.26,11.21,177800.0,1996944.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:13:00',11.22,11.23,11.24,11.22,119200.0,1337770.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:14:00',11.23,11.21,11.23,11.19,375600.0,4207714.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:15:00',11.21,11.21,11.22,11.21,88300.0,990017.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:16:00',11.21,11.2,11.22,11.2,125300.0,1403274.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:17:00',11.19,11.19,11.21,11.19,101800.0,1139480.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:18:00',11.2,11.2,11.21,11.19,99900.0,1118379.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:19:00',11.21,11.2,11.21,11.2,33800.0,378602.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:20:00',11.2,11.18,11.2,11.18,136400.0,1525650.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:21:00',11.18,11.22,11.22,11.18,114100.0,1278375.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:22:00',11.22,11.29,11.3,11.22,155400.0,1750586.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:23:00',11.3,11.26,11.3,11.26,32500.0,366525.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:24:00',11.27,11.32,11.32,11.27,105400.0,1190886.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:25:00',11.32,11.33,11.34,11.32,94200.0,1067788.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:26:00',11.34,11.34,11.35,11.33,97700.0,1107909.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:27:00',11.34,11.32,11.35,11.3,45900.0,519877.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:28:00',11.31,11.34,11.34,11.3,67100.0,760368.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:29:00',11.34,11.35,11.36,11.33,107300.0,1217725.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 11:30:00',11.34,11.33,11.35,11.33,31900.0,361717.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:01:00',11.33,11.3,11.33,11.3,163800.0,1854874.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:02:00',11.32,11.28,11.32,11.28,18800.0,212606.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:03:00',11.3,11.3,11.32,11.29,15100.0,170797.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:04:00',11.32,11.33,11.33,11.31,32500.0,367922.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:05:00',11.33,11.32,11.33,11.32,45900.0,520010.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:06:00',11.33,11.32,11.34,11.32,47500.0,538081.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:07:00',11.33,11.32,11.33,11.32,11000.0,124532.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:08:00',11.32,11.33,11.33,11.32,6900.0,78146.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:09:00',11.33,11.29,11.33,11.29,48500.0,548417.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:10:00',11.28,11.3,11.3,11.28,57300.0,647167.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:11:00',11.29,11.31,11.32,11.29,105600.0,1193513.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:12:00',11.31,11.3,11.31,11.3,37300.0,421730.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:13:00',11.3,11.29,11.3,11.29,15300.0,172751.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:14:00',11.29,11.3,11.3,11.29,11900.0,134382.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:15:00',11.3,11.25,11.3,11.25,86300.0,972690.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:16:00',11.25,11.26,11.27,11.25,40600.0,457406.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:17:00',11.26,11.26,11.28,11.26,33900.0,381866.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:18:00',11.26,11.26,11.26,11.24,77400.0,870776.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:19:00',11.26,11.27,11.27,11.26,45800.0,516044.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:20:00',11.27,11.25,11.27,11.25,48500.0,545868.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:21:00',11.25,11.28,11.28,11.25,36000.0,405597.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:22:00',11.27,11.26,11.28,11.26,39300.0,442846.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:23:00',11.26,11.28,11.29,11.26,53800.0,606769.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:24:00',11.29,11.29,11.3,11.28,40600.0,458601.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:25:00',11.29,11.3,11.3,11.29,47200.0,533344.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:26:00',11.3,11.29,11.3,11.29,83200.0,940090.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:27:00',11.3,11.32,11.32,11.29,46000.0,520109.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:28:00',11.32,11.29,11.32,11.29,31300.0,353497.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:29:00',11.29,11.28,11.3,11.27,83600.0,943134.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:30:00',11.28,11.3,11.31,11.27,114800.0,1296517.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:31:00',11.3,11.28,11.3,11.28,36500.0,411963.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:32:00',11.28,11.3,11.3,11.27,86600.0,976856.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:33:00',11.3,11.31,11.32,11.3,84400.0,954039.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:34:00',11.32,11.33,11.33,11.32,65600.0,742678.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:35:00',11.33,11.31,11.33,11.3,69500.0,786158.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:36:00',11.32,11.35,11.35,11.31,113900.0,1290451.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:37:00',11.34,11.33,11.34,11.32,28800.0,326397.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:38:00',11.33,11.34,11.34,11.32,57900.0,656048.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:39:00',11.34,11.35,11.35,11.34,56300.0,638545.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:40:00',11.35,11.35,11.35,11.31,167700.0,1902036.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:41:00',11.35,11.32,11.35,11.31,30300.0,343412.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:42:00',11.32,11.33,11.33,11.32,37600.0,425811.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:43:00',11.33,11.33,11.33,11.32,30500.0,345472.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:44:00',11.33,11.34,11.34,11.32,101700.0,1152481.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:45:00',11.34,11.34,11.35,11.33,95600.0,1084045.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:46:00',11.34,11.35,11.35,11.33,136400.0,1547277.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:47:00',11.34,11.34,11.35,11.33,78900.0,894651.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:48:00',11.34,11.35,11.35,11.34,121000.0,1372921.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:49:00',11.35,11.35,11.35,11.34,155300.0,1762140.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:50:00',11.35,11.34,11.35,11.33,87200.0,988809.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:51:00',11.33,11.34,11.34,11.32,67500.0,764699.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:52:00',11.34,11.34,11.34,11.33,31000.0,351343.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:53:00',11.34,11.33,11.34,11.32,76700.0,869290.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:54:00',11.34,11.34,11.34,11.33,72000.0,816172.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:55:00',11.34,11.33,11.34,11.32,42700.0,484008.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:56:00',11.33,11.35,11.35,11.33,97800.0,1109167.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:57:00',11.35,11.34,11.35,11.34,86000.0,975752.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:58:00',11.34,11.35,11.35,11.34,79900.0,906431.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 13:59:00',11.35,11.35,11.36,11.33,136400.0,1548251.6100000143,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:00:00',11.36,11.35,11.36,11.35,102100.0,1159249.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:01:00',11.35,11.36,11.36,11.35,89500.0,1016304.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:02:00',11.36,11.34,11.36,11.33,74400.0,844382.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:03:00',11.34,11.36,11.36,11.33,230300.0,2613617.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:04:00',11.35,11.36,11.36,11.35,164200.0,1864718.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:05:00',11.36,11.35,11.36,11.35,38500.0,437274.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:06:00',11.36,11.35,11.36,11.35,169800.0,1928068.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:07:00',11.36,11.4,11.4,11.36,536539.0,6100661.039999992,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:08:00',11.4,11.41,11.42,11.39,389800.0,4444539.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:09:00',11.4,11.41,11.41,11.4,204300.0,2331075.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:10:00',11.4,11.41,11.41,11.4,129400.0,1475937.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:11:00',11.41,11.43,11.43,11.41,218700.0,2497043.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:12:00',11.42,11.44,11.44,11.42,136100.0,1555541.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:13:00',11.44,11.54,11.54,11.43,1018100.0,11692577.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:14:00',11.52,11.51,11.53,11.5,422400.0,4866632.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:15:00',11.51,11.48,11.52,11.47,227100.0,2612523.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:16:00',11.49,11.49,11.5,11.48,198900.0,2285029.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:17:00',11.49,11.5,11.51,11.49,334700.0,3849974.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:18:00',11.5,11.5,11.5,11.48,253100.0,2910151.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:19:00',11.5,11.5,11.51,11.5,115400.0,1327508.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:20:00',11.5,11.54,11.54,11.5,832200.0,9587661.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:21:00',11.53,11.54,11.54,11.52,247600.0,2856077.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:22:00',11.54,11.53,11.54,11.52,69100.0,796877.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:23:00',11.52,11.52,11.53,11.51,106400.0,1226086.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:24:00',11.52,11.52,11.52,11.51,37100.0,427228.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:25:00',11.52,11.52,11.53,11.51,86500.0,996272.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:26:00',11.52,11.52,11.52,11.51,39400.0,453704.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:27:00',11.52,11.52,11.53,11.51,116597.0,1343284.4399999976,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:28:00',11.52,11.51,11.53,11.51,57400.0,660893.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:29:00',11.51,11.51,11.52,11.5,100600.0,1157549.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:30:00',11.51,11.5,11.51,11.5,32600.0,374987.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:31:00',11.5,11.45,11.51,11.45,92900.0,1067509.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:32:00',11.45,11.47,11.47,11.45,62700.0,719896.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:33:00',11.45,11.5,11.51,11.45,65100.0,747497.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:34:00',11.5,11.5,11.5,11.48,116900.0,1344482.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:35:00',11.5,11.5,11.5,11.49,67500.0,776201.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:36:00',11.5,11.49,11.51,11.46,112000.0,1287819.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:37:00',11.49,11.45,11.49,11.45,51000.0,584774.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:38:00',11.45,11.45,11.45,11.43,48300.0,552505.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:39:00',11.44,11.45,11.46,11.43,99100.0,1133866.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:40:00',11.42,11.43,11.45,11.4,188100.0,2148202.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:41:00',11.43,11.45,11.45,11.42,203400.0,2325646.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:42:00',11.45,11.44,11.45,11.43,232900.0,2664436.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:43:00',11.44,11.45,11.45,11.44,207700.0,2377790.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:44:00',11.44,11.47,11.47,11.44,313400.0,3590603.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:45:00',11.47,11.46,11.47,11.46,256700.0,2944280.99999997,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:46:00',11.45,11.48,11.48,11.45,220754.0,2531230.380000055,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:47:00',11.48,11.48,11.48,11.47,262500.0,3013403.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:48:00',11.48,11.47,11.48,11.46,55146.0,632740.6199999452,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:49:00',11.46,11.43,11.46,11.43,44500.0,509405.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:50:00',11.44,11.4,11.44,11.4,138000.0,1575039.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:51:00',11.4,11.42,11.43,11.4,27700.0,316131.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:52:00',11.42,11.43,11.44,11.41,84700.0,968231.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:53:00',11.43,11.44,11.45,11.43,30000.0,343186.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:54:00',11.43,11.43,11.44,11.43,64517.0,737698.3100000024,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:55:00',11.43,11.42,11.44,11.42,84600.0,967217.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:56:00',11.42,11.43,11.43,11.41,92300.0,1053894.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 14:57:00',11.42,11.42,11.42,11.41,39200.0,447526.0,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-03 15:00:00',11.42,11.42,11.42,11.42,172054.0,1964856.6800000072,11.35)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:31:00',11.55,11.6,11.65,11.55,907400.0,10511377.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:32:00',11.6,11.63,11.64,11.59,551900.0,6414024.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:33:00',11.64,11.7,11.71,11.64,591300.0,6900202.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:34:00',11.7,11.76,11.76,11.7,626000.0,7350773.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:35:00',11.77,11.73,11.78,11.73,551600.0,6482611.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:36:00',11.73,11.73,11.74,11.71,376500.0,4414561.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:37:00',11.72,11.68,11.73,11.68,307100.0,3594829.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:38:00',11.66,11.62,11.7,11.61,441200.0,5139836.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:39:00',11.61,11.62,11.64,11.6,335900.0,3902614.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:40:00',11.63,11.58,11.64,11.55,530800.0,6161605.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:41:00',11.55,11.54,11.58,11.53,223000.0,2577635.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:42:00',11.55,11.56,11.57,11.52,244400.0,2821957.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:43:00',11.55,11.58,11.58,11.55,346300.0,4006971.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:44:00',11.57,11.56,11.57,11.55,199700.0,2307770.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:45:00',11.56,11.56,11.58,11.56,188200.0,2177986.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:46:00',11.57,11.54,11.57,11.54,188500.0,2179350.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:47:00',11.54,11.53,11.56,11.53,140500.0,1622212.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:48:00',11.54,11.49,11.54,11.49,274100.0,3157452.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:49:00',11.49,11.52,11.52,11.48,249800.0,2872306.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:50:00',11.52,11.53,11.53,11.52,354000.0,4081214.620000005,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:51:00',11.53,11.53,11.54,11.52,143158.0,1650281.7399999946,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:52:00',11.54,11.53,11.54,11.51,308400.0,3556988.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:53:00',11.55,11.59,11.6,11.54,418100.0,4838869.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:54:00',11.59,11.58,11.6,11.58,134600.0,1559841.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:55:00',11.58,11.54,11.58,11.54,86000.0,993791.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:56:00',11.54,11.55,11.55,11.52,31900.0,367830.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:57:00',11.54,11.54,11.55,11.54,27500.0,317423.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:58:00',11.54,11.53,11.55,11.53,45100.0,520180.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 09:59:00',11.53,11.52,11.54,11.52,52000.0,599342.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:00:00',11.52,11.53,11.53,11.51,31100.0,358222.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:01:00',11.52,11.5,11.52,11.5,116100.0,1336027.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:02:00',11.5,11.5,11.5,11.49,67600.0,776937.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:03:00',11.49,11.48,11.5,11.48,94900.0,1090210.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:04:00',11.49,11.46,11.49,11.45,159660.0,1830713.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:05:00',11.45,11.44,11.47,11.44,447140.0,5118904.799999997,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:06:00',11.44,11.5,11.5,11.44,329660.0,3781580.600000009,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:07:00',11.5,11.5,11.5,11.49,140100.0,1610940.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:08:00',11.5,11.5,11.5,11.49,59900.0,688429.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:09:00',11.5,11.5,11.5,11.48,73200.0,841193.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:10:00',11.5,11.49,11.5,11.48,112700.0,1295175.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:11:00',11.48,11.48,11.49,11.47,104400.0,1198557.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:12:00',11.48,11.49,11.49,11.47,46200.0,530456.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:13:00',11.49,11.47,11.49,11.47,51300.0,588862.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:14:00',11.47,11.48,11.48,11.47,39200.0,449688.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:15:00',11.48,11.46,11.48,11.46,40800.0,468080.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:16:00',11.47,11.46,11.47,11.46,97300.0,1115636.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:17:00',11.46,11.47,11.47,11.46,45300.0,519462.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:18:00',11.47,11.46,11.47,11.46,30600.0,350741.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:19:00',11.46,11.46,11.47,11.46,41900.0,480414.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:20:00',11.47,11.46,11.47,11.46,82800.0,949611.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:21:00',11.46,11.47,11.47,11.46,89300.0,1024060.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:22:00',11.47,11.47,11.48,11.47,14800.0,169762.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:23:00',11.47,11.47,11.48,11.47,49700.0,570458.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:24:00',11.47,11.48,11.48,11.47,140600.0,1613876.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:25:00',11.48,11.49,11.49,11.47,117100.0,1344288.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:26:00',11.48,11.48,11.49,11.47,45000.0,516889.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:27:00',11.48,11.47,11.48,11.47,24900.0,285679.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:28:00',11.47,11.46,11.47,11.46,14200.0,162770.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:29:00',11.46,11.46,11.47,11.46,62300.0,714038.9999999851,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:30:00',11.46,11.45,11.46,11.45,45600.0,522439.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:31:00',11.45,11.44,11.46,11.44,78100.0,894275.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:32:00',11.44,11.44,11.45,11.43,117000.0,1338227.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:33:00',11.43,11.43,11.44,11.42,133125.0,1521295.5,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:34:00',11.43,11.41,11.43,11.4,159775.0,1823829.25,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:35:00',11.41,11.42,11.42,11.4,35500.0,405026.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:36:00',11.41,11.41,11.42,11.41,50300.0,574030.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:37:00',11.41,11.41,11.42,11.41,96300.0,1099034.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:38:00',11.41,11.41,11.41,11.41,74300.0,848177.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:39:00',11.41,11.42,11.42,11.41,39800.0,454243.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:40:00',11.41,11.42,11.42,11.41,45800.0,522904.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:41:00',11.42,11.42,11.42,11.41,38700.0,441801.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:42:00',11.42,11.41,11.42,11.41,193100.0,2203593.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:43:00',11.41,11.42,11.42,11.41,90200.0,1030064.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:44:00',11.42,11.42,11.43,11.42,51000.0,582499.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:45:00',11.42,11.42,11.43,11.42,46800.0,534543.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:46:00',11.42,11.42,11.43,11.41,70000.0,799187.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:47:00',11.41,11.41,11.42,11.41,17800.0,203125.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:48:00',11.41,11.41,11.42,11.41,108200.0,1235494.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:49:00',11.41,11.41,11.42,11.41,12700.0,144949.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:50:00',11.41,11.41,11.42,11.41,13400.0,152967.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:51:00',11.42,11.42,11.42,11.41,23200.0,264790.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:52:00',11.42,11.42,11.42,11.41,20000.0,228277.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:53:00',11.42,11.41,11.42,11.41,45600.0,520600.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:54:00',11.41,11.41,11.42,11.41,57700.0,658434.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:55:00',11.41,11.43,11.43,11.41,95100.0,1086338.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:56:00',11.42,11.43,11.44,11.42,194200.0,2219316.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:57:00',11.44,11.44,11.44,11.43,10600.0,121253.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:58:00',11.44,11.44,11.44,11.43,21900.0,250530.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 10:59:00',11.44,11.43,11.44,11.43,20400.0,233338.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:00:00',11.43,11.43,11.44,11.43,21900.0,250432.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:01:00',11.43,11.44,11.45,11.43,84600.0,967784.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:02:00',11.44,11.44,11.45,11.44,84400.0,966311.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:03:00',11.45,11.45,11.46,11.44,20100.0,230191.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:04:00',11.45,11.45,11.46,11.45,9400.0,107641.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:05:00',11.45,11.45,11.46,11.45,11000.0,126014.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:06:00',11.45,11.46,11.46,11.45,9400.0,107655.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:07:00',11.46,11.45,11.46,11.45,16900.0,193589.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:08:00',11.45,11.45,11.46,11.45,15400.0,176385.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:09:00',11.46,11.45,11.46,11.45,11600.0,132837.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:10:00',11.45,11.45,11.46,11.45,9600.0,109943.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:11:00',11.45,11.45,11.46,11.45,7500.0,85935.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:12:00',11.45,11.45,11.46,11.45,36500.0,418061.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:13:00',11.45,11.46,11.46,11.45,23100.0,264636.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:14:00',11.45,11.45,11.46,11.45,31100.0,356124.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:15:00',11.45,11.46,11.46,11.44,43000.0,492335.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:16:00',11.44,11.45,11.45,11.44,13500.0,154609.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:17:00',11.44,11.44,11.45,11.44,14900.0,170494.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:18:00',11.44,11.43,11.44,11.43,25200.0,288287.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:19:00',11.44,11.44,11.45,11.43,25800.0,295068.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:20:00',11.44,11.44,11.45,11.43,15000.0,171581.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:21:00',11.44,11.45,11.47,11.44,135600.0,1553718.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:22:00',11.47,11.49,11.49,11.44,211800.0,2430527.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:23:00',11.5,11.47,11.52,11.47,467537.0,5376870.129999995,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:24:00',11.47,11.49,11.5,11.47,70700.0,812462.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:25:00',11.49,11.49,11.51,11.49,119800.0,1377527.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:26:00',11.49,11.51,11.52,11.49,101800.0,1171902.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:27:00',11.52,11.51,11.52,11.51,23800.0,274100.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:28:00',11.52,11.52,11.52,11.51,109000.0,1255624.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:29:00',11.52,11.53,11.53,11.52,94100.0,1084933.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 11:30:00',11.53,11.53,11.54,11.53,46900.0,540799.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:01:00',11.53,11.56,11.56,11.53,225500.0,2604097.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:02:00',11.56,11.6,11.6,11.56,282100.0,3268939.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:03:00',11.6,11.63,11.64,11.59,247500.0,2874935.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:04:00',11.63,11.64,11.66,11.62,214100.0,2493356.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:05:00',11.66,11.67,11.69,11.63,243700.0,2842458.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:06:00',11.66,11.62,11.66,11.6,155300.0,1803920.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:07:00',11.62,11.63,11.63,11.6,35666.0,414469.9200000167,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:08:00',11.63,11.62,11.63,11.58,73800.0,856028.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:09:00',11.62,11.64,11.65,11.62,182200.0,2120971.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:10:00',11.64,11.64,11.65,11.6,186800.0,2172908.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:11:00',11.61,11.61,11.65,11.6,99200.0,1152563.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:12:00',11.62,11.6,11.63,11.59,223000.0,2587443.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:13:00',11.6,11.62,11.64,11.6,80000.0,929742.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:14:00',11.61,11.64,11.64,11.61,27922.0,324605.4199999869,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:15:00',11.64,11.63,11.64,11.62,66900.0,778431.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:16:00',11.63,11.62,11.65,11.62,122700.0,1428421.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:17:00',11.62,11.61,11.63,11.61,41600.0,483220.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:18:00',11.61,11.59,11.61,11.58,27678.0,321095.5800000131,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:19:00',11.59,11.58,11.59,11.56,47800.0,553382.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:20:00',11.58,11.59,11.6,11.56,42500.0,492765.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:21:00',11.6,11.58,11.6,11.58,43000.0,498714.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:22:00',11.59,11.59,11.59,11.58,31800.0,368312.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:23:00',11.59,11.58,11.59,11.57,21400.0,247778.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:24:00',11.58,11.57,11.58,11.56,32900.0,380588.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:25:00',11.56,11.59,11.59,11.56,42500.0,492084.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:26:00',11.58,11.6,11.6,11.58,83800.0,971768.5600000024,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:27:00',11.61,11.6,11.61,11.59,8300.0,96299.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:28:00',11.59,11.59,11.6,11.58,28900.0,334873.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:29:00',11.59,11.56,11.59,11.56,39500.0,457397.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:30:00',11.56,11.56,11.57,11.56,74200.0,858157.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:31:00',11.56,11.57,11.58,11.55,54800.0,633943.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:32:00',11.58,11.62,11.62,11.57,108600.0,1259501.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:33:00',11.62,11.6,11.62,11.6,126200.0,1466016.7800000012,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:34:00',11.6,11.6,11.63,11.6,218481.0,2536886.599999994,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:35:00',11.59,11.59,11.6,11.59,104619.0,1212764.400000006,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:36:00',11.6,11.58,11.6,11.56,97200.0,1126019.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:37:00',11.58,11.6,11.61,11.58,44700.0,518282.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:38:00',11.6,11.62,11.63,11.6,79400.0,922199.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:39:00',11.64,11.65,11.66,11.62,123692.0,1440583.6599999964,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:40:00',11.65,11.66,11.67,11.65,118500.0,1381623.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:41:00',11.65,11.65,11.66,11.64,51600.0,601051.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:42:00',11.65,11.66,11.67,11.65,85032.0,991095.4799999893,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:43:00',11.66,11.65,11.67,11.65,53900.0,628537.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:44:00',11.66,11.65,11.67,11.64,75600.0,881482.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:45:00',11.65,11.74,11.74,11.65,556691.0,6512344.060000002,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:46:00',11.74,11.72,11.74,11.71,158700.0,1861696.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:47:00',11.71,11.68,11.72,11.68,106100.0,1241840.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:48:00',11.69,11.68,11.69,11.67,128300.0,1498924.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:49:00',11.67,11.65,11.67,11.65,97709.0,1139307.9399999976,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:50:00',11.65,11.66,11.67,11.64,112791.0,1314383.0600000024,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:51:00',11.67,11.66,11.67,11.65,28600.0,333553.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:52:00',11.65,11.65,11.67,11.65,102700.0,1198039.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:53:00',11.66,11.65,11.67,11.65,92900.0,1082923.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:54:00',11.65,11.67,11.67,11.65,93217.0,1087300.3900000155,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:55:00',11.67,11.67,11.68,11.66,42483.0,495909.6099999845,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:56:00',11.66,11.69,11.69,11.66,72317.0,844650.2199999988,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:57:00',11.69,11.69,11.7,11.67,47000.0,549292.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:58:00',11.68,11.68,11.69,11.67,52500.0,613395.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 13:59:00',11.67,11.67,11.68,11.66,32900.0,384062.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:00:00',11.66,11.68,11.68,11.66,42400.0,494808.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:01:00',11.67,11.66,11.68,11.65,39983.0,466337.7800000012,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:02:00',11.66,11.65,11.66,11.65,59200.0,690117.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:03:00',11.65,11.65,11.66,11.64,41500.0,483686.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:04:00',11.65,11.67,11.67,11.65,55000.0,641485.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:05:00',11.65,11.66,11.67,11.65,52900.0,616666.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:06:00',11.65,11.64,11.66,11.64,73200.0,852847.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:07:00',11.64,11.63,11.64,11.62,84500.0,983285.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:08:00',11.63,11.64,11.64,11.62,67000.0,779341.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:09:00',11.63,11.59,11.63,11.59,90900.0,1055733.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:10:00',11.59,11.58,11.6,11.57,119300.0,1382200.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:11:00',11.57,11.6,11.6,11.56,66800.0,773697.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:12:00',11.58,11.57,11.6,11.56,106500.0,1233529.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:13:00',11.56,11.57,11.57,11.55,179100.0,2070229.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:14:00',11.57,11.58,11.58,11.56,122573.0,1418386.8799999952,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:15:00',11.58,11.6,11.6,11.57,71800.0,832365.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:16:00',11.59,11.59,11.6,11.57,84100.0,975112.5400000215,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:17:00',11.6,11.54,11.6,11.54,131527.0,1520418.3899999857,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:18:00',11.54,11.54,11.54,11.53,252300.0,2911447.00000003,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:19:00',11.55,11.55,11.55,11.53,106700.0,1232067.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:20:00',11.54,11.57,11.57,11.54,97473.0,1126292.1499999762,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:21:00',11.55,11.57,11.57,11.55,35800.0,414117.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:22:00',11.57,11.58,11.58,11.56,70900.0,820610.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:23:00',11.57,11.56,11.58,11.56,69300.0,801902.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:24:00',11.57,11.59,11.59,11.55,72700.0,841462.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:25:00',11.59,11.6,11.61,11.58,68500.0,794432.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:26:00',11.6,11.64,11.65,11.6,271400.0,3158497.0799999833,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:27:00',11.64,11.63,11.64,11.6,76700.0,891695.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:28:00',11.63,11.66,11.66,11.61,179698.0,2092381.699999988,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:29:00',11.66,11.69,11.7,11.66,287400.0,3357436.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:30:00',11.66,11.69,11.7,11.66,76200.0,890817.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:31:00',11.69,11.68,11.69,11.67,73300.0,856869.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:32:00',11.68,11.69,11.7,11.68,159200.0,1861908.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:33:00',11.71,11.7,11.71,11.69,107600.0,1258925.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:34:00',11.71,11.73,11.73,11.7,123200.0,1443006.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:35:00',11.73,11.71,11.73,11.71,72400.0,848655.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:36:00',11.71,11.71,11.72,11.71,89600.0,1049816.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:37:00',11.71,11.7,11.72,11.7,49500.0,579364.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:38:00',11.7,11.7,11.71,11.7,49890.0,583867.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:39:00',11.7,11.7,11.71,11.7,69100.0,808827.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:40:00',11.71,11.7,11.71,11.69,273000.0,3194146.100000024,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:41:00',11.7,11.71,11.71,11.69,335100.0,3921236.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:42:00',11.71,11.7,11.71,11.69,225400.0,2637167.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:43:00',11.7,11.7,11.71,11.69,45200.0,528885.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:44:00',11.71,11.7,11.71,11.69,48276.0,564936.1999999881,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:45:00',11.7,11.68,11.7,11.67,24500.0,286204.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:46:00',11.67,11.7,11.71,11.67,109654.0,1282759.2599999905,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:47:00',11.71,11.72,11.72,11.71,165489.0,1938800.1899999976,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:48:00',11.72,11.73,11.73,11.72,75900.0,889822.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:49:00',11.73,11.72,11.73,11.72,80500.0,943968.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:50:00',11.72,11.73,11.73,11.72,124200.0,1456271.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:51:00',11.73,11.75,11.75,11.73,390600.0,4586597.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:52:00',11.74,11.78,11.78,11.74,563135.0,6625838.950000048,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:53:00',11.78,11.78,11.78,11.76,270700.0,3188454.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:54:00',11.79,11.81,11.82,11.78,688760.0,8125004.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:55:00',11.81,11.8,11.81,11.78,284500.0,3357734.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:56:00',11.8,11.77,11.8,11.73,431600.0,5073451.199999988,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 14:57:00',11.76,11.78,11.78,11.76,108700.0,1279095.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-06 15:00:00',11.78,11.78,11.78,11.78,229300.0,2701154.0,11.42)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:31:00',11.56,11.59,11.65,11.56,357700.0,4145450.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:32:00',11.6,11.61,11.63,11.59,169200.0,1964288.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:33:00',11.6,11.62,11.62,11.6,159000.0,1846577.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:34:00',11.62,11.6,11.63,11.6,252705.0,2934441.9000000004,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:35:00',11.61,11.59,11.61,11.53,461495.0,5341009.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:36:00',11.58,11.64,11.64,11.58,161200.0,1871636.9999999981,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:37:00',11.64,11.7,11.7,11.63,364400.0,4250718.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:38:00',11.7,11.72,11.72,11.66,282400.0,3301811.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:39:00',11.69,11.72,11.74,11.69,239200.0,2804372.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:40:00',11.73,11.78,11.8,11.73,247800.0,2915690.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:41:00',11.78,11.81,11.82,11.78,283837.0,3347461.490000002,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:42:00',11.8,11.77,11.8,11.76,119463.0,1407704.7699999958,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:43:00',11.75,11.77,11.78,11.74,170600.0,2005333.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:44:00',11.77,11.77,11.78,11.71,238900.0,2811370.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:45:00',11.77,11.75,11.77,11.73,57500.0,675187.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:46:00',11.72,11.7,11.72,11.7,53500.0,626781.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:47:00',11.7,11.72,11.72,11.68,100000.0,1168537.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:48:00',11.72,11.7,11.72,11.7,50800.0,594895.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:49:00',11.69,11.69,11.7,11.67,173900.0,2032259.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:50:00',11.66,11.68,11.69,11.66,92000.0,1074240.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:51:00',11.68,11.69,11.69,11.68,51300.0,599440.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:52:00',11.69,11.72,11.72,11.69,86600.0,1013644.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:53:00',11.72,11.74,11.75,11.71,86292.0,1012255.1600000039,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:54:00',11.74,11.71,11.74,11.69,86508.0,1013349.9200000018,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:55:00',11.7,11.72,11.73,11.68,58300.0,682105.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:56:00',11.72,11.68,11.72,11.67,75700.0,885433.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:57:00',11.68,11.69,11.7,11.67,87200.0,1018693.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:58:00',11.69,11.68,11.69,11.67,33900.0,395908.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 09:59:00',11.68,11.68,11.69,11.67,51300.0,599246.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:00:00',11.67,11.67,11.67,11.65,79500.0,927279.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:01:00',11.68,11.65,11.68,11.64,100200.0,1167971.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:02:00',11.65,11.64,11.65,11.62,132400.0,1540544.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:03:00',11.64,11.66,11.66,11.62,101000.0,1175748.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:04:00',11.66,11.67,11.67,11.65,45900.0,535318.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:05:00',11.67,11.64,11.67,11.62,100900.0,1174982.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:06:00',11.64,11.7,11.71,11.62,270600.0,3159453.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:07:00',11.7,11.72,11.76,11.65,305700.0,3587904.9999999925,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:08:00',11.72,11.72,11.75,11.7,19300.0,225823.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:09:00',11.72,11.73,11.74,11.72,46000.0,539458.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:10:00',11.73,11.74,11.75,11.73,29100.0,341638.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:11:00',11.74,11.71,11.75,11.71,61000.0,715473.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:12:00',11.72,11.72,11.74,11.7,76400.0,894294.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:13:00',11.72,11.75,11.82,11.72,271300.0,3196803.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:14:00',11.8,11.84,11.85,11.8,210500.0,2489891.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:15:00',11.84,11.84,11.84,11.81,155800.0,1843780.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:16:00',11.84,11.94,11.95,11.84,355200.0,4231674.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:17:00',11.94,11.92,11.96,11.9,476500.0,5683660.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:18:00',11.92,11.91,11.93,11.9,213100.0,2538189.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:19:00',11.9,11.89,11.92,11.89,165800.0,1974602.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:20:00',11.89,11.91,11.91,11.89,168289.0,2003160.5900000036,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:21:00',11.91,11.9,11.91,11.88,212451.0,2528443.4099999964,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:22:00',11.89,11.92,11.93,11.89,180864.0,2153663.0900000036,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:23:00',11.92,11.95,11.96,11.92,137700.0,1644217.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:24:00',11.95,12.0,12.0,11.95,535700.0,6420378.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:25:00',12.0,12.04,12.04,12.0,297600.0,3578211.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:26:00',12.02,12.0,12.02,11.98,253100.0,3038148.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:27:00',12.0,12.0,12.01,11.98,194400.0,2332786.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:28:00',12.0,12.0,12.0,11.95,379000.0,4543717.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:29:00',12.0,12.04,12.04,12.0,198400.0,2385253.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:30:00',12.03,12.01,12.05,12.01,166000.0,1996865.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:31:00',12.0,11.97,12.01,11.97,181000.0,2169715.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:32:00',11.97,11.99,11.99,11.96,140900.0,1688177.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:33:00',11.98,11.95,11.98,11.94,111332.0,1331464.7600000054,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:34:00',11.96,12.0,12.01,11.94,167800.0,2009925.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:35:00',12.01,12.01,12.01,11.99,126400.0,1517584.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:36:00',11.99,12.0,12.01,11.98,212000.0,2543336.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:37:00',12.0,12.0,12.0,11.99,116900.0,1402478.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:38:00',12.0,12.0,12.0,11.98,129600.0,1555010.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:39:00',11.99,12.0,12.01,11.98,80900.0,970864.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:40:00',12.0,12.0,12.01,11.99,126700.0,1521066.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:41:00',12.0,12.0,12.01,11.99,76600.0,919096.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:42:00',12.0,12.0,12.01,11.99,74000.0,887990.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:43:00',12.0,12.0,12.02,12.0,83000.0,996610.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:44:00',12.0,12.02,12.03,12.0,58700.0,705384.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:45:00',12.02,12.0,12.03,12.0,52500.0,631011.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:46:00',12.0,11.95,12.0,11.95,130100.0,1559162.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:47:00',11.95,11.92,11.95,11.92,126168.0,1506029.2400000095,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:48:00',11.92,11.93,11.93,11.9,203532.0,2424166.799999982,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:49:00',11.93,11.93,11.96,11.93,101990.0,1216607.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:50:00',11.91,11.9,11.95,11.9,127800.0,1521916.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:51:00',11.91,11.91,11.94,11.9,25900.0,308867.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:52:00',11.91,11.95,11.95,11.91,53500.0,639014.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:53:00',11.94,11.96,11.97,11.93,51400.0,614166.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:54:00',11.97,11.95,11.97,11.95,58500.0,699540.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:55:00',11.95,11.97,11.98,11.95,38200.0,457136.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:56:00',11.98,11.97,11.99,11.95,158300.0,1894970.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:57:00',11.97,11.97,11.98,11.95,40600.0,485976.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:58:00',11.97,11.99,12.0,11.96,161300.0,1932126.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 10:59:00',11.99,12.01,12.01,11.99,51900.0,622760.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:00:00',12.0,11.98,12.01,11.98,47800.0,573467.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:01:00',11.98,11.99,12.0,11.98,27600.0,330951.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:02:00',11.99,11.99,11.99,11.98,18100.0,216966.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:03:00',11.99,11.98,11.99,11.98,35800.0,429223.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:04:00',11.98,11.98,11.99,11.98,68300.0,818610.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:05:00',11.98,11.97,11.99,11.97,42500.0,509153.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:06:00',11.97,11.99,11.99,11.96,34700.0,415733.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:07:00',11.99,11.97,12.0,11.96,21100.0,252874.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:08:00',11.97,11.96,11.99,11.96,24800.0,296937.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:09:00',11.96,11.95,11.98,11.95,29500.0,353064.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:10:00',11.95,11.95,11.96,11.95,33700.0,402950.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:11:00',11.96,11.96,11.97,11.95,36200.0,433011.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:12:00',11.96,11.96,11.97,11.96,16400.0,196199.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:13:00',11.96,11.97,11.98,11.96,10500.0,125645.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:14:00',11.97,11.97,11.98,11.97,15300.0,183249.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:15:00',11.98,11.99,11.99,11.98,45500.0,545452.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:16:00',11.99,12.01,12.02,11.99,151560.0,1819663.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:17:00',12.01,12.03,12.03,12.01,88900.0,1068800.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:18:00',12.02,12.03,12.03,12.01,145800.0,1754099.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:19:00',12.03,12.06,12.06,12.03,265200.0,3196467.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:20:00',12.06,12.08,12.08,12.06,94200.0,1136842.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:21:00',12.08,12.08,12.09,12.07,215467.0,2602713.0200000107,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:22:00',12.08,12.12,12.13,12.08,189310.0,2291281.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:23:00',12.13,12.09,12.14,12.09,150000.0,1818077.900000006,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:24:00',12.09,12.1,12.12,12.09,64300.0,778390.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:25:00',12.1,12.1,12.1,12.09,68500.0,828968.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:26:00',12.11,12.09,12.12,12.09,120700.0,1460715.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:27:00',12.09,12.08,12.1,12.08,109690.0,1326097.099999994,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:28:00',12.08,12.06,12.09,12.06,92400.0,1116280.900000006,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:29:00',12.06,12.06,12.08,12.06,57600.0,695067.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 11:30:00',12.07,12.09,12.09,12.06,26500.0,319971.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:01:00',12.09,12.07,12.1,12.07,129400.0,1564192.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:02:00',12.07,12.05,12.07,12.05,113333.0,1367652.9799999893,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:03:00',12.06,12.04,12.06,12.04,61400.0,739896.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:04:00',12.04,12.02,12.05,12.02,89300.0,1074425.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:05:00',12.02,12.01,12.03,12.01,106200.0,1276014.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:06:00',12.01,11.99,12.02,11.99,86840.0,1042156.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:07:00',11.99,12.0,12.02,11.99,92660.0,1111983.400000006,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:08:00',12.01,12.0,12.01,12.0,85900.0,1031328.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:09:00',12.0,11.98,12.0,11.97,57240.0,685716.599999994,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:10:00',11.97,11.96,11.97,11.96,68800.0,823265.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:11:00',11.96,11.95,11.96,11.93,97000.0,1158998.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:12:00',11.96,11.96,11.96,11.93,42860.0,512292.59999999404,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:13:00',11.96,11.98,11.98,11.96,59000.0,706154.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:14:00',11.98,11.98,12.0,11.97,73140.0,876933.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:15:00',11.97,11.95,11.98,11.95,73500.0,879475.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:16:00',11.96,11.96,11.98,11.95,44400.0,530969.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:17:00',11.96,11.94,11.96,11.94,36100.0,431383.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:18:00',11.94,11.93,11.95,11.93,46900.0,559719.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:19:00',11.93,11.89,11.93,11.89,145900.0,1737848.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:20:00',11.89,11.91,11.93,11.88,65300.0,777135.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:21:00',11.93,11.92,11.93,11.9,72100.0,858756.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:22:00',11.91,11.92,11.92,11.91,43400.0,517449.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:23:00',11.92,11.92,11.93,11.92,76200.0,908618.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:24:00',11.9,11.9,11.91,11.89,77600.0,923978.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:25:00',11.9,11.89,11.91,11.89,77400.0,921197.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:26:00',11.89,11.89,11.91,11.89,52500.0,624914.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:27:00',11.9,11.92,11.92,11.9,73200.0,871577.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:28:00',11.92,11.94,11.94,11.92,68900.0,821981.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:29:00',11.94,11.92,11.94,11.91,34500.0,411353.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:30:00',11.92,11.91,11.92,11.91,23300.0,277678.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:31:00',11.91,11.91,11.92,11.91,57000.0,679335.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:32:00',11.91,11.89,11.91,11.89,83400.0,992819.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:33:00',11.89,11.87,11.89,11.86,210600.0,2501788.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:34:00',11.88,11.81,11.88,11.81,260200.0,3078808.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:35:00',11.8,11.86,11.86,11.8,162200.0,1920478.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:36:00',11.86,11.84,11.87,11.83,69900.0,828699.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:37:00',11.84,11.82,11.85,11.82,51600.0,610763.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:38:00',11.83,11.8,11.83,11.79,141300.0,1668275.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:39:00',11.8,11.8,11.8,11.77,165800.0,1954307.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:40:00',11.8,11.8,11.8,11.78,82800.0,976549.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:41:00',11.8,11.81,11.83,11.79,79800.0,942170.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:42:00',11.81,11.81,11.83,11.8,50600.0,597531.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:43:00',11.8,11.79,11.81,11.79,109500.0,1292148.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:44:00',11.79,11.78,11.79,11.78,79300.0,934697.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:45:00',11.78,11.8,11.81,11.78,82700.0,975597.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:46:00',11.81,11.82,11.82,11.8,40800.0,481780.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:47:00',11.81,11.81,11.82,11.8,80500.0,950273.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:48:00',11.79,11.79,11.81,11.78,94900.0,1119019.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:49:00',11.78,11.78,11.79,11.78,99700.0,1174646.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:50:00',11.78,11.76,11.78,11.75,90300.0,1061993.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:51:00',11.75,11.73,11.75,11.72,185300.0,2172829.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:52:00',11.73,11.73,11.76,11.71,97700.0,1145413.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:53:00',11.73,11.77,11.77,11.73,37800.0,444156.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:54:00',11.77,11.78,11.78,11.77,43000.0,506361.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:55:00',11.78,11.77,11.78,11.77,81200.0,955961.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:56:00',11.77,11.77,11.78,11.76,36400.0,428338.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:57:00',11.77,11.77,11.77,11.76,59800.0,703499.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:58:00',11.76,11.79,11.79,11.76,52300.0,616201.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 13:59:00',11.79,11.79,11.79,11.78,58100.0,684769.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:00:00',11.8,11.78,11.8,11.78,48100.0,567104.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:01:00',11.78,11.78,11.79,11.77,43800.0,515912.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:02:00',11.78,11.77,11.78,11.76,34500.0,406061.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:03:00',11.77,11.77,11.77,11.76,27700.0,326032.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:04:00',11.77,11.76,11.77,11.75,80000.0,940777.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:05:00',11.76,11.75,11.76,11.75,66500.0,781523.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:06:00',11.75,11.74,11.76,11.74,196900.0,2312718.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:07:00',11.75,11.75,11.76,11.73,153100.0,1797798.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:08:00',11.75,11.76,11.76,11.74,106600.0,1252299.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:09:00',11.74,11.76,11.76,11.74,84500.0,992785.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:10:00',11.76,11.76,11.77,11.76,47700.0,561058.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:11:00',11.76,11.77,11.78,11.76,47100.0,554192.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:12:00',11.76,11.77,11.78,11.76,27800.0,327194.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:13:00',11.77,11.78,11.78,11.75,51100.0,601394.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:14:00',11.76,11.8,11.8,11.75,135000.0,1588455.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:15:00',11.8,11.79,11.8,11.78,36100.0,425523.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:16:00',11.8,11.79,11.8,11.78,113500.0,1338296.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:17:00',11.79,11.79,11.8,11.78,45000.0,530429.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:18:00',11.79,11.8,11.8,11.78,49200.0,580234.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:19:00',11.8,11.82,11.82,11.8,43200.0,510430.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:20:00',11.83,11.86,11.86,11.83,58900.0,697146.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:21:00',11.85,11.81,11.86,11.81,78300.0,926567.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:22:00',11.82,11.82,11.82,11.81,37700.0,445321.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:23:00',11.8,11.8,11.82,11.8,27600.0,325903.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:24:00',11.82,11.78,11.82,11.78,33300.0,393267.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:25:00',11.79,11.8,11.8,11.78,38800.0,457648.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:26:00',11.8,11.8,11.8,11.79,11900.0,140406.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:27:00',11.81,11.83,11.83,11.8,62500.0,738130.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:28:00',11.83,11.87,11.87,11.83,61064.0,723757.1200000048,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:29:00',11.86,11.85,11.87,11.85,23400.0,277705.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:30:00',11.86,11.86,11.87,11.8,75100.0,888528.0799999833,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:31:00',11.86,11.87,11.87,11.82,45621.0,539986.2200000286,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:32:00',11.84,11.86,11.87,11.83,43600.0,516837.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:33:00',11.85,11.81,11.86,11.81,40200.0,475623.78999996185,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:34:00',11.83,11.85,11.85,11.81,41579.0,491491.15000003576,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:35:00',11.81,11.84,11.85,11.81,18100.0,214370.78999996185,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:36:00',11.83,11.83,11.85,11.83,20300.0,240353.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:37:00',11.83,11.85,11.86,11.83,59200.0,701452.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:38:00',11.85,11.86,11.86,11.82,101300.0,1200326.7900000215,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:39:00',11.85,11.86,11.87,11.84,23600.0,279738.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:40:00',11.87,11.84,11.87,11.84,45900.0,544341.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:41:00',11.85,11.83,11.86,11.83,48600.0,575773.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:42:00',11.86,11.84,11.86,11.82,77400.0,915829.6200000048,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:43:00',11.83,11.83,11.84,11.81,44300.0,523969.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:44:00',11.83,11.81,11.83,11.81,68900.0,814555.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:45:00',11.81,11.81,11.82,11.81,79100.0,934546.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:46:00',11.81,11.82,11.82,11.81,53900.0,636811.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:47:00',11.82,11.81,11.82,11.8,131100.0,1547666.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:48:00',11.8,11.81,11.81,11.8,38800.0,457967.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:49:00',11.8,11.82,11.82,11.8,58400.0,689759.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:50:00',11.82,11.81,11.82,11.81,66100.0,780881.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:51:00',11.81,11.81,11.82,11.81,13391.0,158199.70999997854,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:52:00',11.82,11.81,11.82,11.79,152600.0,1801575.9900000095,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:53:00',11.82,11.81,11.82,11.8,104300.0,1231667.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:54:00',11.8,11.81,11.82,11.8,141260.0,1667581.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:55:00',11.81,11.8,11.82,11.8,126740.0,1496139.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:56:00',11.79,11.76,11.8,11.76,191345.0,2255231.949999988,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 14:57:00',11.78,11.78,11.78,11.76,99500.0,1171310.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-07 15:00:00',11.77,11.77,11.77,11.77,210600.0,2478762.0,11.78)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:31:00',11.86,11.99,11.99,11.86,969300.0,11544577.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:32:00',12.0,12.06,12.08,12.0,732300.0,8812888.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:33:00',12.06,11.93,12.06,11.9,1186100.0,14202394.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:34:00',11.94,11.88,11.95,11.88,725800.0,8640879.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:35:00',11.87,11.92,11.92,11.85,331400.0,3940248.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:36:00',11.91,11.82,11.91,11.82,333300.0,3952700.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:37:00',11.82,11.86,11.86,11.8,269600.0,3190195.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:38:00',11.86,11.8,11.86,11.8,500000.0,5915343.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:39:00',11.83,11.82,11.83,11.78,268300.0,3166061.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:40:00',11.82,11.88,11.88,11.77,246265.0,2911120.8999999985,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:41:00',11.89,11.9,11.94,11.89,207700.0,2475814.0000000075,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:42:00',11.93,11.92,11.93,11.89,149700.0,1782699.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:43:00',11.92,12.01,12.01,11.92,582907.0,6970150.439999998,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:44:00',12.01,11.97,12.01,11.97,283000.0,3392985.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:45:00',11.98,11.98,11.99,11.97,545200.0,6532243.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:46:00',11.98,12.02,12.02,11.97,668400.0,8014970.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:47:00',12.05,12.01,12.05,12.01,337700.0,4061646.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:48:00',12.01,12.02,12.02,12.0,139000.0,1669695.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:49:00',12.01,12.01,12.02,12.01,178100.0,2139348.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:50:00',12.0,12.0,12.01,12.0,121500.0,1458754.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:51:00',12.01,11.99,12.01,11.99,208800.0,2505681.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:52:00',12.0,11.98,12.0,11.98,140400.0,1682153.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:53:00',11.98,12.02,12.02,11.98,275800.0,3308836.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:54:00',12.02,12.02,12.03,12.01,140100.0,1684206.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:55:00',12.03,12.04,12.05,12.03,170400.0,2050992.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:56:00',12.05,12.09,12.09,12.03,317900.0,3833499.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:57:00',12.08,12.08,12.09,12.08,345800.0,4179299.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:58:00',12.06,12.03,12.07,12.03,130900.0,1577011.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 09:59:00',12.01,12.04,12.04,12.01,77900.0,937076.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:00:00',12.04,12.03,12.04,12.02,48800.0,587197.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:01:00',12.03,12.01,12.03,12.0,84900.0,1020105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:02:00',12.01,12.02,12.02,12.0,143300.0,1723109.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:03:00',12.02,12.05,12.07,12.02,156300.0,1883042.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:04:00',12.07,12.13,12.13,12.06,248800.0,3009978.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:05:00',12.13,12.13,12.13,12.11,227700.0,2759920.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:06:00',12.14,12.11,12.14,12.1,212300.0,2572077.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:07:00',12.1,12.14,12.14,12.1,304500.0,3689113.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:08:00',12.15,12.13,12.15,12.11,215100.0,2610990.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:09:00',12.13,12.12,12.13,12.12,44400.0,538354.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:10:00',12.12,12.12,12.13,12.11,72300.0,876546.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:11:00',12.12,12.08,12.12,12.08,220200.0,2665568.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:12:00',12.08,12.1,12.1,12.08,101600.0,1228260.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:13:00',12.11,12.11,12.12,12.1,63800.0,772511.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:14:00',12.12,12.11,12.12,12.1,83600.0,1012696.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:15:00',12.11,12.08,12.11,12.08,120500.0,1458134.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:16:00',12.1,12.05,12.1,12.05,103800.0,1253430.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:17:00',12.05,12.03,12.07,12.03,145000.0,1747767.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:18:00',12.04,12.02,12.04,12.01,164600.0,1979260.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:19:00',12.01,11.99,12.02,11.99,162400.0,1949130.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:20:00',11.98,11.98,12.0,11.97,110500.0,1324680.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:21:00',11.99,11.98,11.99,11.97,31700.0,379668.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:22:00',11.98,11.99,11.99,11.97,76700.0,919152.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:23:00',11.98,11.96,11.99,11.96,114800.0,1375071.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:24:00',11.96,11.93,11.98,11.91,163000.0,1945281.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:25:00',11.93,11.95,11.98,11.93,34500.0,411824.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:26:00',11.93,11.94,11.94,11.93,45800.0,546620.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:27:00',11.93,11.93,11.94,11.93,72200.0,861402.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:28:00',11.94,11.93,11.94,11.93,83900.0,1001456.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:29:00',11.94,11.95,11.96,11.93,31200.0,372555.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:30:00',11.96,11.98,11.98,11.96,20100.0,240573.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:31:00',11.97,11.96,11.98,11.96,22000.0,263405.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:32:00',11.96,11.98,12.0,11.96,30700.0,368045.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:33:00',12.0,11.97,12.0,11.97,38200.0,457647.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:34:00',11.97,12.0,12.0,11.97,17900.0,214645.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:35:00',12.0,11.96,12.0,11.96,102000.0,1221077.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:36:00',11.96,11.95,11.98,11.95,34100.0,407809.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:37:00',11.95,11.94,11.95,11.94,17100.0,204271.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:38:00',11.94,11.94,11.95,11.93,10700.0,127751.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:39:00',11.93,11.93,11.95,11.93,47100.0,562049.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:40:00',11.94,11.94,11.94,11.93,31900.0,380739.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:41:00',11.93,11.94,11.95,11.93,17200.0,205353.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:42:00',11.94,11.96,11.97,11.94,19800.0,236641.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:43:00',11.96,11.95,11.97,11.95,48200.0,576622.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:44:00',11.98,11.95,11.98,11.95,8200.0,98105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:45:00',11.96,11.97,11.97,11.95,23900.0,285778.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:46:00',11.97,11.97,11.98,11.95,33200.0,397359.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:47:00',11.97,11.97,11.98,11.95,30500.0,365205.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:48:00',11.96,11.94,11.97,11.94,41200.0,492462.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:49:00',11.94,11.93,11.94,11.92,104700.0,1248368.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:50:00',11.94,11.93,11.94,11.92,25200.0,300656.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:51:00',11.93,11.91,11.93,11.91,170200.0,2028315.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:52:00',11.91,11.91,11.91,11.9,65400.0,778657.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:53:00',11.9,11.88,11.9,11.88,86800.0,1032232.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:54:00',11.89,11.89,11.89,11.88,34400.0,408873.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:55:00',11.88,11.89,11.89,11.88,51000.0,606105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:56:00',11.89,11.92,11.92,11.89,82400.0,980813.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:57:00',11.92,11.95,11.95,11.92,79600.0,950668.4300000072,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:58:00',11.95,11.95,11.95,11.92,80900.0,965951.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 10:59:00',11.95,11.95,11.96,11.94,68300.0,816046.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:00:00',11.95,11.96,11.96,11.95,19100.0,228402.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:01:00',11.96,11.93,11.96,11.93,58700.0,701503.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:02:00',11.93,11.89,11.95,11.89,71800.0,855173.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:03:00',11.89,11.89,11.9,11.88,28300.0,336423.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:04:00',11.89,11.88,11.9,11.88,55900.0,664750.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:05:00',11.89,11.9,11.92,11.88,35100.0,417661.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:06:00',11.91,11.92,11.92,11.91,21600.0,257313.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:07:00',11.92,11.91,11.92,11.9,28000.0,333443.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:08:00',11.91,11.91,11.91,11.9,15500.0,184557.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:09:00',11.91,11.93,11.93,11.9,77500.0,923605.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:10:00',11.93,11.93,11.93,11.92,35200.0,419758.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:11:00',11.93,11.92,11.93,11.91,35200.0,419427.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:12:00',11.91,11.89,11.92,11.89,20400.0,242863.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:13:00',11.89,11.9,11.9,11.89,18400.0,218869.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:14:00',11.9,11.9,11.9,11.89,29800.0,354472.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:15:00',11.9,11.88,11.9,11.88,59500.0,707013.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:16:00',11.87,11.84,11.88,11.84,106200.0,1259777.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:17:00',11.84,11.82,11.84,11.81,62500.0,738601.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:18:00',11.82,11.81,11.82,11.8,130500.0,1541133.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:19:00',11.81,11.81,11.82,11.8,53200.0,628256.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:20:00',11.82,11.83,11.83,11.81,89400.0,1056532.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:21:00',11.83,11.9,11.9,11.82,294500.0,3497211.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:22:00',11.88,11.86,11.89,11.86,110100.0,1307920.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:23:00',11.86,11.86,11.87,11.86,23800.0,282433.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:24:00',11.87,11.86,11.87,11.84,22100.0,261970.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:25:00',11.86,11.85,11.87,11.85,78200.0,927491.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:26:00',11.86,11.86,11.86,11.85,38200.0,452428.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:27:00',11.85,11.86,11.86,11.84,9400.0,111425.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:28:00',11.87,11.86,11.87,11.85,6500.0,77089.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:29:00',11.86,11.85,11.86,11.85,8300.0,98418.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 11:30:00',11.85,11.84,11.86,11.83,9400.0,111329.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:01:00',11.84,11.81,11.84,11.81,227900.0,2694345.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:02:00',11.82,11.82,11.82,11.8,30600.0,361511.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:03:00',11.81,11.81,11.82,11.81,13600.0,160657.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:04:00',11.85,11.81,11.85,11.8,77900.0,920352.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:05:00',11.81,11.81,11.84,11.8,62800.0,742590.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:06:00',11.81,11.83,11.85,11.81,202900.0,2402548.419999987,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:07:00',11.82,11.9,11.9,11.82,50058.0,593918.3000000119,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:08:00',11.89,11.93,11.93,11.86,117800.0,1402898.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:09:00',11.93,11.93,11.94,11.9,111100.0,1324350.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:10:00',11.93,11.9,11.96,11.9,135900.0,1622071.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:11:00',11.9,11.94,11.95,11.9,82900.0,988800.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:12:00',11.94,11.97,11.97,11.93,62600.0,748523.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:13:00',11.98,12.0,12.0,11.97,105800.0,1268615.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:14:00',11.99,11.99,12.0,11.98,70600.0,846399.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:15:00',11.99,12.02,12.02,11.97,455658.0,5471984.159999996,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:16:00',12.03,12.08,12.09,12.01,190300.0,2294897.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:17:00',12.08,12.11,12.12,12.07,397000.0,4803242.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:18:00',12.1,12.1,12.17,12.01,924900.0,11236227.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:19:00',12.1,12.19,12.19,12.1,367100.0,4472429.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:20:00',12.19,12.16,12.19,12.16,200100.0,2437388.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:21:00',12.15,12.09,12.15,12.08,127600.0,1544602.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:22:00',12.1,12.12,12.13,12.08,113900.0,1378052.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:23:00',12.12,12.1,12.12,12.09,107600.0,1301942.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:24:00',12.11,12.08,12.11,12.08,109200.0,1319868.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:25:00',12.09,12.1,12.1,12.07,99300.0,1200979.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:26:00',12.09,12.09,12.1,12.08,48600.0,587562.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:27:00',12.09,12.05,12.09,12.05,90300.0,1089928.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:28:00',12.05,12.02,12.06,12.02,84500.0,1016340.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:29:00',12.03,12.04,12.04,12.02,64700.0,778018.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:30:00',12.04,12.05,12.05,12.03,52400.0,631005.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:31:00',12.05,12.09,12.1,12.05,53800.0,649968.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:32:00',12.09,12.12,12.12,12.08,262500.0,3176698.99999997,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:33:00',12.13,12.17,12.18,12.12,118900.0,1444982.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:34:00',12.17,12.13,12.18,12.13,122300.0,1487613.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:35:00',12.13,12.14,12.16,12.13,193400.0,2348903.1100000143,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:36:00',12.13,12.13,12.15,12.12,108900.0,1321690.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:37:00',12.12,12.1,12.15,12.1,113600.0,1376982.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:38:00',12.1,12.08,12.12,12.08,178300.0,2157748.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:39:00',12.07,12.08,12.08,12.06,55200.0,666637.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:40:00',12.09,12.12,12.12,12.08,86000.0,1040562.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:41:00',12.11,12.11,12.12,12.11,85800.0,1039755.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:42:00',12.11,12.11,12.13,12.1,243900.0,2952943.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:43:00',12.11,12.1,12.11,12.09,128700.0,1557646.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:44:00',12.09,12.06,12.09,12.06,93100.0,1124181.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:45:00',12.08,12.04,12.09,12.04,87000.0,1049309.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:46:00',12.05,12.08,12.08,12.05,114151.0,1375917.550000012,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:47:00',12.07,12.07,12.08,12.06,84200.0,1016780.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:48:00',12.07,12.06,12.08,12.05,146700.0,1770253.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:49:00',12.07,12.06,12.08,12.06,217800.0,2625084.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:50:00',12.07,12.07,12.08,12.06,102400.0,1236308.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:51:00',12.07,12.07,12.08,12.07,51900.0,626728.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:52:00',12.06,12.08,12.08,12.06,61712.0,744801.8399999738,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:53:00',12.07,12.08,12.08,12.07,76000.0,917855.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:54:00',12.07,12.07,12.08,12.06,100800.0,1216622.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:55:00',12.08,12.04,12.08,12.04,75888.0,915106.1600000262,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:56:00',12.04,12.01,12.04,12.01,49700.0,597576.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:57:00',12.02,12.02,12.02,12.0,117400.0,1409872.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:58:00',12.01,11.99,12.01,11.99,88500.0,1061844.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 13:59:00',11.99,11.99,11.99,11.97,459400.0,5500281.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:00:00',11.97,11.98,11.99,11.97,65400.0,783356.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:01:00',11.99,12.02,12.02,11.98,95100.0,1140802.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:02:00',12.01,12.04,12.05,12.01,89100.0,1072340.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:03:00',12.05,12.04,12.07,12.03,122500.0,1477145.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:04:00',12.04,12.05,12.05,12.04,64300.0,774569.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:05:00',12.04,12.04,12.04,12.02,67900.0,817021.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:06:00',12.04,12.05,12.05,12.02,46200.0,555942.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:07:00',12.03,12.03,12.04,12.02,45500.0,547352.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:08:00',12.02,12.01,12.03,12.0,102300.0,1229247.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:09:00',12.0,12.01,12.02,12.0,31500.0,378402.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:10:00',12.01,12.0,12.01,12.0,41200.0,494819.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:11:00',12.0,11.99,12.02,11.99,56600.0,679339.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:12:00',12.0,11.99,12.0,11.98,48000.0,575773.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:13:00',11.98,11.99,12.0,11.98,25900.0,310571.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:14:00',11.99,11.99,11.99,11.98,26800.0,321210.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:15:00',11.98,11.98,11.99,11.97,22800.0,273152.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:16:00',11.98,11.95,11.98,11.95,99900.0,1195754.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:17:00',11.95,11.92,11.95,11.92,96600.0,1152911.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:18:00',11.91,11.91,11.92,11.9,79900.0,952248.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:19:00',11.92,11.91,11.93,11.91,34800.0,414584.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:20:00',11.91,11.9,11.91,11.9,41500.0,493898.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:21:00',11.89,11.89,11.89,11.88,21500.0,255622.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:22:00',11.88,11.87,11.89,11.87,33500.0,397937.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:23:00',11.88,11.85,11.88,11.85,74600.0,884573.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:24:00',11.84,11.84,11.86,11.83,113600.0,1345079.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:25:00',11.84,11.89,11.89,11.84,147000.0,1742627.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:26:00',11.89,11.9,11.9,11.86,28500.0,338871.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:27:00',11.9,11.92,11.93,11.89,55100.0,656600.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:28:00',11.93,11.9,11.93,11.9,31300.0,372966.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:29:00',11.9,11.92,11.92,11.9,15200.0,180980.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:30:00',11.92,11.92,11.93,11.9,36900.0,439541.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:31:00',11.92,11.93,11.93,11.92,31100.0,370979.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:32:00',11.93,11.97,11.98,11.93,47228.0,564658.0400000215,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:33:00',11.96,12.0,12.0,11.96,56100.0,672112.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:34:00',11.99,11.99,11.99,11.98,27700.0,331967.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:35:00',11.99,11.99,12.0,11.99,43400.0,520416.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:36:00',12.0,12.0,12.0,11.98,34000.0,407703.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:37:00',11.99,11.99,12.0,11.99,33500.0,401682.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:38:00',11.99,11.98,11.99,11.98,15100.0,181016.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:39:00',11.98,11.99,11.99,11.98,20700.0,248105.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:40:00',11.98,11.96,11.99,11.96,22400.0,268301.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:41:00',11.96,11.97,11.97,11.94,24300.0,290581.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:42:00',11.97,11.97,11.97,11.95,35300.0,422383.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:43:00',11.96,11.95,11.97,11.9,174600.0,2082936.1599999666,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:44:00',11.95,11.94,11.95,11.91,5500.0,65679.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:45:00',11.94,11.94,11.96,11.92,95400.0,1139203.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:46:00',11.94,11.94,11.97,11.92,31400.0,375225.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:47:00',11.97,11.99,11.99,11.94,66957.0,802030.8600000143,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:48:00',11.98,11.99,12.0,11.98,30300.0,363151.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:49:00',11.99,11.98,12.0,11.98,90000.0,1079313.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:50:00',12.0,12.0,12.0,11.99,118883.0,1426176.1700000167,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:51:00',12.0,12.0,12.0,11.99,133000.0,1595759.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:52:00',12.0,11.99,12.0,11.98,122217.0,1465572.8299999833,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:53:00',11.99,11.99,12.0,11.98,210200.0,2520826.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:54:00',12.0,12.01,12.01,12.0,127800.0,1533947.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:55:00',12.02,12.05,12.05,12.01,123300.0,1482774.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:56:00',12.04,12.07,12.08,12.04,154483.0,1863283.3199999928,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 14:57:00',12.07,12.08,12.09,12.07,232000.0,2802836.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-08 15:00:00',12.09,12.09,12.09,12.09,293500.0,3548415.0,11.77)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:31:00',11.8,11.96,11.96,11.79,1181452.0,13952825.2,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:32:00',11.93,11.87,11.94,11.84,348500.0,4140560.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:33:00',11.87,11.89,11.92,11.87,277900.0,3304930.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:34:00',11.89,11.84,11.89,11.8,336600.0,3985417.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:35:00',11.85,11.84,11.87,11.81,303800.0,3595813.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:36:00',11.84,11.83,11.84,11.82,180000.0,2129381.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:37:00',11.84,11.84,11.85,11.83,117800.0,1394355.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:38:00',11.83,11.74,11.83,11.74,1094600.0,12901723.000000004,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:39:00',11.73,11.68,11.75,11.66,564800.0,6611564.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:40:00',11.69,11.76,11.8,11.69,269000.0,3151298.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:41:00',11.76,11.78,11.78,11.74,213500.0,2509286.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:42:00',11.78,11.8,11.8,11.77,155500.0,1832592.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:43:00',11.79,11.76,11.79,11.76,252800.0,2975994.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:44:00',11.77,11.77,11.79,11.75,269400.0,3170422.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:45:00',11.77,11.79,11.8,11.77,140400.0,1654537.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:46:00',11.79,11.8,11.8,11.78,110900.0,1307492.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:47:00',11.8,11.83,11.83,11.8,148400.0,1752863.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:48:00',11.83,11.82,11.83,11.8,169200.0,2000161.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:49:00',11.8,11.82,11.83,11.8,91600.0,1082658.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:50:00',11.83,11.82,11.84,11.81,126000.0,1489754.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:51:00',11.82,11.79,11.82,11.79,111300.0,1313866.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:52:00',11.79,11.76,11.79,11.76,282300.0,3323827.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:53:00',11.76,11.76,11.77,11.75,170500.0,2005499.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:54:00',11.76,11.79,11.79,11.75,130100.0,1531191.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:55:00',11.79,11.8,11.8,11.78,122400.0,1443704.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:56:00',11.8,11.81,11.82,11.79,86300.0,1019182.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:57:00',11.81,11.82,11.83,11.81,57800.0,683367.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:58:00',11.83,11.83,11.83,11.81,84100.0,994313.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 09:59:00',11.83,11.81,11.83,11.81,96600.0,1141702.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:00:00',11.81,11.84,11.84,11.81,71600.0,846272.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:01:00',11.82,11.82,11.83,11.81,65800.0,777777.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:02:00',11.81,11.83,11.83,11.81,71500.0,845057.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:03:00',11.83,11.86,11.86,11.83,63496.0,752347.6799999923,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:04:00',11.86,11.87,11.89,11.86,122300.0,1451942.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:05:00',11.86,11.87,11.88,11.86,79700.0,945965.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:06:00',11.88,11.88,11.88,11.86,65200.0,774223.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:07:00',11.87,11.89,11.9,11.86,114800.0,1364045.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:08:00',11.88,11.86,11.9,11.86,64400.0,764960.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:09:00',11.87,11.86,11.88,11.84,119900.0,1421962.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:10:00',11.84,11.85,11.86,11.82,73300.0,867638.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:11:00',11.83,11.82,11.84,11.82,53200.0,629552.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:12:00',11.82,11.83,11.85,11.82,133900.0,1584264.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:13:00',11.83,11.85,11.85,11.83,54500.0,645483.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:14:00',11.85,11.88,11.88,11.84,183000.0,2170850.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:15:00',11.88,11.89,11.9,11.86,74100.0,880572.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:16:00',11.9,11.9,11.92,11.89,78200.0,930818.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:17:00',11.92,11.89,11.92,11.88,125600.0,1494664.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:18:00',11.88,11.88,11.9,11.88,64100.0,761948.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:19:00',11.87,11.88,11.91,11.87,54500.0,647863.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:20:00',11.89,11.9,11.92,11.89,77000.0,916637.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:21:00',11.93,11.9,11.93,11.88,206800.0,2458801.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:22:00',11.88,11.85,11.88,11.84,106000.0,1256595.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:23:00',11.85,11.83,11.86,11.83,159200.0,1885375.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:24:00',11.83,11.84,11.86,11.83,148800.0,1761022.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:25:00',11.83,11.82,11.83,11.81,215200.0,2543641.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:26:00',11.82,11.82,11.84,11.81,225200.0,2662189.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:27:00',11.82,11.82,11.83,11.81,129000.0,1524894.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:28:00',11.82,11.82,11.83,11.82,114600.0,1354650.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:29:00',11.83,11.84,11.87,11.83,112600.0,1332867.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:30:00',11.84,11.85,11.85,11.82,110800.0,1311207.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:31:00',11.83,11.82,11.85,11.82,81500.0,964412.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:32:00',11.84,11.84,11.84,11.82,76700.0,907196.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:33:00',11.84,11.85,11.85,11.82,61500.0,728081.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:34:00',11.85,11.85,11.86,11.84,27500.0,325729.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:35:00',11.85,11.87,11.87,11.85,89300.0,1059118.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:36:00',11.85,11.87,11.88,11.82,92400.0,1096264.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:37:00',11.87,11.86,11.87,11.84,58700.0,696401.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:38:00',11.85,11.86,11.88,11.85,68100.0,808058.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:39:00',11.86,11.88,11.89,11.86,50500.0,599803.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:40:00',11.88,11.88,11.89,11.86,34200.0,406103.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:41:00',11.88,11.88,11.89,11.87,13200.0,156802.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:42:00',11.88,11.91,11.93,11.88,144500.0,1719635.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:43:00',11.91,11.94,11.95,11.91,158800.0,1894595.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:44:00',11.94,11.96,11.96,11.91,115300.0,1377235.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:45:00',11.95,11.91,11.95,11.91,66100.0,788861.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:46:00',11.92,11.93,11.94,11.92,75500.0,900353.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:47:00',11.94,11.95,11.96,11.93,89500.0,1069684.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:48:00',11.95,11.96,11.97,11.95,72000.0,861053.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:49:00',11.95,11.97,11.98,11.95,147200.0,1762197.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:50:00',11.97,11.99,11.99,11.97,161500.0,1935869.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:51:00',11.99,12.0,12.01,11.99,76500.0,917906.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:52:00',12.0,12.0,12.0,11.99,78300.0,939684.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:53:00',12.0,11.95,12.01,11.95,106000.0,1269869.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:54:00',11.95,12.0,12.0,11.95,89700.0,1074745.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:55:00',12.0,11.98,12.0,11.96,44900.0,538166.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:56:00',11.96,11.95,11.97,11.95,15200.0,181801.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:57:00',11.95,11.94,11.95,11.91,86600.0,1033371.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:58:00',11.94,11.94,11.95,11.94,12100.0,144526.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 10:59:00',11.94,11.95,11.96,11.94,35000.0,418281.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:00:00',11.96,11.97,11.97,11.96,21700.0,259540.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:01:00',11.96,11.97,11.97,11.96,8700.0,104090.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:02:00',11.97,11.95,11.97,11.95,36700.0,439063.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:03:00',11.96,11.96,11.96,11.95,22000.0,263109.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:04:00',11.96,11.96,11.97,11.96,22300.0,266865.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:05:00',11.96,11.95,11.96,11.95,13200.0,157878.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:06:00',11.95,11.99,11.99,11.95,20000.0,239539.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:07:00',11.99,11.99,12.0,11.99,31886.0,382583.1400000155,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:08:00',12.0,11.99,12.0,11.98,31500.0,377722.1399999857,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:09:00',11.99,11.99,11.99,11.98,27900.0,334285.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:10:00',11.99,11.97,11.99,11.97,11414.0,136729.7199999988,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:11:00',11.97,11.98,11.98,11.97,26100.0,312661.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:12:00',11.98,11.99,11.99,11.97,36500.0,437536.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:13:00',12.0,12.0,12.0,11.99,39400.0,472703.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:14:00',12.0,11.99,12.02,11.99,27400.0,329000.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:15:00',11.99,11.99,12.0,11.99,2000.0,23981.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:16:00',11.99,12.0,12.0,11.99,67000.0,803868.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:17:00',11.99,12.0,12.01,11.99,26000.0,312084.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:18:00',12.0,11.97,12.0,11.97,58600.0,702840.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:19:00',11.97,11.98,11.99,11.97,35100.0,420603.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:20:00',11.97,11.96,11.98,11.96,36200.0,433314.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:21:00',11.96,11.97,11.97,11.96,25200.0,301540.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:22:00',11.98,11.98,11.99,11.97,74700.0,895397.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:23:00',11.98,11.99,11.99,11.96,74100.0,888127.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:24:00',11.99,11.96,11.99,11.96,33800.0,404807.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:25:00',11.96,11.95,11.97,11.95,24500.0,292918.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:26:00',11.95,11.95,11.95,11.94,24500.0,292679.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:27:00',11.94,11.96,11.96,11.94,40900.0,488751.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:28:00',11.95,11.95,11.96,11.95,12400.0,148204.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:29:00',11.95,11.95,11.96,11.94,64457.0,770570.150000006,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 11:30:00',11.95,11.96,11.96,11.93,42743.0,510538.84999999404,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:01:00',11.97,11.96,11.98,11.96,70600.0,844816.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:02:00',11.96,11.98,11.98,11.96,18500.0,221566.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:03:00',11.97,11.98,11.98,11.97,29500.0,353483.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:04:00',11.98,11.98,12.0,11.98,54600.0,655162.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:05:00',11.98,12.0,12.01,11.98,88200.0,1057428.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:06:00',11.99,12.0,12.0,11.96,23700.0,284382.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:07:00',12.0,12.02,12.02,11.96,53800.0,646060.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:08:00',12.02,11.98,12.02,11.98,20800.0,249202.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:09:00',11.98,12.0,12.0,11.98,27700.0,332378.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:10:00',12.0,12.0,12.0,11.98,12200.0,146372.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:11:00',12.0,11.98,12.0,11.98,4400.0,52724.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:12:00',11.98,11.93,11.98,11.93,108700.0,1299979.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:13:00',11.93,11.91,11.95,11.9,82900.0,987624.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:14:00',11.93,11.93,11.98,11.92,103700.0,1240948.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:15:00',11.93,11.96,11.97,11.93,3900.0,46668.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:16:00',11.96,11.95,11.98,11.95,62600.0,749326.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:17:00',11.95,11.96,11.98,11.95,10700.0,128102.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:18:00',11.96,12.02,12.02,11.96,118400.0,1420798.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:19:00',12.01,12.01,12.02,12.0,35700.0,429016.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:20:00',12.01,12.02,12.02,12.01,19200.0,230699.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:21:00',12.02,12.01,12.02,12.01,14600.0,175379.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:22:00',12.01,12.0,12.04,12.0,103600.0,1245267.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:23:00',12.0,11.97,12.01,11.97,9600.0,115115.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:24:00',11.97,11.96,11.98,11.96,9100.0,108942.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:25:00',11.97,11.99,11.99,11.96,18800.0,225218.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:26:00',11.99,11.99,12.0,11.99,9000.0,107915.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:27:00',11.99,12.01,12.01,11.99,31500.0,378045.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:28:00',12.01,12.03,12.03,12.01,31100.0,373794.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:29:00',12.03,12.02,12.04,12.02,21300.0,256204.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:30:00',12.02,12.02,12.03,12.0,90000.0,1080841.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:31:00',12.02,12.02,12.03,12.02,24100.0,289965.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:32:00',12.02,12.03,12.03,12.02,25700.0,309124.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:33:00',12.03,12.02,12.04,12.02,23200.0,279098.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:34:00',12.02,12.06,12.06,12.02,79100.0,953326.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:35:00',12.06,12.11,12.12,12.06,298800.0,3612764.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:36:00',12.12,12.12,12.13,12.11,105900.0,1283501.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:37:00',12.11,12.13,12.13,12.11,92900.0,1126259.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:38:00',12.13,12.13,12.15,12.13,96400.0,1169894.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:39:00',12.12,12.11,12.13,12.1,42300.0,512549.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:40:00',12.1,12.1,12.1,12.09,22700.0,274581.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:41:00',12.1,12.11,12.13,12.1,46800.0,567083.099999994,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:42:00',12.11,12.13,12.14,12.11,104390.0,1265925.7000000179,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:43:00',12.13,12.1,12.13,12.09,129600.0,1569111.2999999821,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:44:00',12.1,12.1,12.12,12.1,35300.0,427724.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:45:00',12.11,12.1,12.13,12.1,27200.0,329237.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:46:00',12.1,12.11,12.12,12.1,25501.0,308947.10000002384,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:47:00',12.12,12.13,12.13,12.11,70300.0,852342.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:48:00',12.13,12.11,12.13,12.1,11400.0,138103.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:49:00',12.11,12.1,12.12,12.1,13300.0,161070.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:50:00',12.1,12.11,12.12,12.1,46438.0,562392.1799999774,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:51:00',12.11,12.11,12.13,12.1,83862.0,1015883.0600000024,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:52:00',12.11,12.13,12.13,12.11,59838.0,725375.9399999976,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:53:00',12.14,12.15,12.15,12.13,86000.0,1044086.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:54:00',12.14,12.15,12.15,12.14,42000.0,510247.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:55:00',12.13,12.13,12.14,12.13,82600.0,1002290.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:56:00',12.13,12.12,12.14,12.11,71162.0,863430.6299999952,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:57:00',12.12,12.12,12.15,12.11,45200.0,548489.8000000119,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:58:00',12.15,12.12,12.15,12.12,13400.0,162586.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 13:59:00',12.12,12.12,12.14,12.12,38900.0,471704.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:00:00',12.14,12.12,12.14,12.12,26300.0,318796.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:01:00',12.12,12.13,12.13,12.11,66328.0,804196.3600000143,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:02:00',12.13,12.11,12.14,12.11,33672.0,408110.0799999833,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:03:00',12.11,12.1,12.13,12.1,47100.0,570431.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:04:00',12.1,12.11,12.11,12.1,19369.0,234477.59000000358,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:05:00',12.12,12.12,12.12,12.11,15400.0,186643.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:06:00',12.12,12.11,12.13,12.11,15400.0,186660.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:07:00',12.11,12.12,12.12,12.11,20600.0,249528.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:08:00',12.12,12.1,12.12,12.1,31531.0,381865.7199999988,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:09:00',12.11,12.1,12.11,12.1,23900.0,289493.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:10:00',12.1,12.1,12.11,12.09,53000.0,641330.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:11:00',12.1,12.09,12.1,12.08,49500.0,598315.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:12:00',12.07,12.04,12.07,12.04,61300.0,739610.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:13:00',12.04,12.01,12.04,12.01,164900.0,1981477.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:14:00',12.01,12.05,12.05,12.01,22100.0,265748.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:15:00',12.05,11.97,12.05,11.97,67800.0,813874.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:16:00',11.97,11.95,11.97,11.94,112800.0,1348763.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:17:00',11.95,11.95,11.96,11.95,26900.0,321502.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:18:00',11.96,11.97,11.99,11.96,33100.0,396230.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:19:00',11.98,11.96,11.98,11.96,27100.0,324405.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:20:00',11.96,11.99,11.99,11.96,17200.0,205815.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:21:00',11.98,12.0,12.0,11.98,19500.0,233949.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:22:00',12.0,11.98,12.0,11.98,5900.0,70682.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:23:00',11.98,11.98,11.98,11.97,23100.0,276745.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:24:00',11.98,11.97,11.98,11.95,59500.0,711852.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:25:00',11.97,11.96,11.97,11.95,36300.0,434076.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:26:00',11.95,11.95,11.95,11.94,51700.0,617761.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:27:00',11.96,11.97,11.97,11.95,102800.0,1229707.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:28:00',11.97,11.97,11.99,11.97,44100.0,528334.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:29:00',11.97,11.99,11.99,11.97,28800.0,345015.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:30:00',11.96,11.99,11.99,11.96,7500.0,89804.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:31:00',11.99,12.0,12.0,11.98,55100.0,661147.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:32:00',12.0,12.0,12.05,12.0,24500.0,294675.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:33:00',12.01,12.02,12.02,12.0,49100.0,589928.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:34:00',12.02,12.02,12.03,12.02,8200.0,98583.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:35:00',12.03,12.05,12.05,12.03,58200.0,700387.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:36:00',12.05,12.06,12.06,12.05,31500.0,379668.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:37:00',12.07,12.08,12.08,12.05,32900.0,396865.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:38:00',12.08,12.07,12.09,12.07,124200.0,1499433.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:39:00',12.06,12.05,12.06,12.04,29100.0,350621.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:40:00',12.05,12.02,12.05,12.02,35700.0,429752.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:41:00',12.03,12.03,12.04,12.02,107600.0,1294053.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:42:00',12.04,12.06,12.08,12.04,62900.0,757692.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:43:00',12.08,12.08,12.09,12.06,33400.0,403474.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:44:00',12.08,12.08,12.09,12.08,36700.0,443528.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:45:00',12.08,12.08,12.09,12.07,30900.0,373291.56999999285,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:46:00',12.08,12.06,12.08,12.06,57900.0,698897.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:47:00',12.06,12.05,12.07,12.05,11400.0,137457.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:48:00',12.06,12.07,12.08,12.05,112900.0,1362505.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:49:00',12.08,12.06,12.08,12.06,33669.0,406603.1400000155,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:50:00',12.07,12.08,12.09,12.07,97100.0,1172659.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:51:00',12.08,12.06,12.09,12.06,77131.0,931442.8599999845,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:52:00',12.06,12.08,12.09,12.06,47400.0,572668.25,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:53:00',12.07,12.06,12.08,12.05,44900.0,541687.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:54:00',12.05,12.08,12.08,12.05,117400.0,1417946.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:55:00',12.08,12.08,12.09,12.08,76200.0,920693.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:56:00',12.08,12.07,12.09,12.07,49800.0,601680.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 14:57:00',12.08,12.08,12.08,12.06,62000.0,748534.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-09 15:00:00',12.08,12.08,12.08,12.08,130900.0,1581272.0,12.09)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:31:00',12.06,12.1,12.1,12.02,316526.0,3816146.3,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:32:00',12.08,12.08,12.15,12.07,131200.0,1589154.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:33:00',12.06,12.05,12.08,12.05,49400.0,596016.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:34:00',12.05,12.08,12.08,12.05,75200.0,907711.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:35:00',12.07,12.12,12.12,12.07,86500.0,1045399.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:36:00',12.12,12.08,12.14,12.06,88100.0,1065477.000000001,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:37:00',12.08,12.09,12.09,12.06,22400.0,270362.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:38:00',12.07,12.05,12.07,12.0,113400.0,1364445.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:39:00',12.05,12.01,12.05,12.01,30400.0,365468.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:40:00',12.01,11.95,12.01,11.95,136200.0,1630108.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:41:00',11.95,12.0,12.04,11.95,169752.0,2035382.039999999,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:42:00',12.02,12.01,12.04,12.01,68700.0,825444.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:43:00',12.01,11.98,12.01,11.98,44900.0,538684.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:44:00',11.98,11.97,11.98,11.96,78000.0,933675.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:45:00',11.97,11.96,11.97,11.96,66000.0,789435.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:46:00',11.96,11.95,11.97,11.95,81400.0,973556.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:47:00',11.95,11.95,11.96,11.94,108600.0,1297634.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:48:00',11.95,11.95,11.96,11.94,46600.0,556907.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:49:00',11.95,11.95,11.96,11.95,51400.0,614276.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:50:00',11.95,11.98,11.98,11.95,104200.0,1246640.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:51:00',12.0,11.99,12.0,11.97,147700.0,1769823.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:52:00',11.98,11.99,11.99,11.96,102000.0,1221748.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:53:00',11.99,11.97,12.0,11.97,11600.0,138885.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:54:00',11.97,11.97,11.99,11.96,31600.0,378161.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:55:00',11.97,11.96,11.99,11.96,54300.0,649568.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:56:00',11.96,11.95,11.97,11.95,47200.0,564310.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:57:00',11.96,11.93,11.96,11.93,116800.0,1395667.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:58:00',11.93,11.91,11.93,11.91,89600.0,1067691.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 09:59:00',11.9,11.92,11.94,11.9,88398.0,1053290.1999999993,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:00:00',11.94,11.92,11.94,11.91,80500.0,960517.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:01:00',11.92,11.9,11.92,11.9,78400.0,933755.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:02:00',11.9,11.93,11.93,11.9,187100.0,2230130.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:03:00',11.93,11.95,11.95,11.91,172600.0,2055217.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:04:00',11.93,11.91,11.93,11.91,36300.0,432817.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:05:00',11.91,11.92,11.92,11.91,42600.0,507551.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:06:00',11.92,11.91,11.93,11.9,151400.0,1802617.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:07:00',11.9,11.9,11.91,11.9,74200.0,883104.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:08:00',11.9,11.9,11.91,11.9,112500.0,1339359.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:09:00',11.9,11.91,11.92,11.9,28500.0,339459.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:10:00',11.91,11.91,11.92,11.91,49700.0,591928.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:11:00',11.91,11.91,11.92,11.9,51100.0,608953.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:12:00',11.91,11.92,11.95,11.91,64600.0,770182.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:13:00',11.92,11.93,11.95,11.92,64400.0,767805.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:14:00',11.93,11.93,11.93,11.92,33100.0,394730.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:15:00',11.93,11.92,11.94,11.91,38000.0,453054.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:16:00',11.92,11.91,11.92,11.91,65200.0,776615.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:17:00',11.91,11.9,11.92,11.9,87300.0,1039071.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:18:00',11.9,11.88,11.91,11.88,369000.0,4390167.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:19:00',11.9,11.89,11.91,11.88,69500.0,826864.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:20:00',11.89,11.89,11.9,11.89,63000.0,749152.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:21:00',11.89,11.88,11.89,11.88,95890.0,1140070.200000003,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:22:00',11.88,11.88,11.89,11.87,26100.0,310042.1000000015,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:23:00',11.88,11.87,11.89,11.86,70900.0,841972.099999994,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:24:00',11.87,11.88,11.88,11.86,45100.0,535246.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:25:00',11.86,11.86,11.88,11.86,120900.0,1434415.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:26:00',11.86,11.87,11.87,11.85,62700.0,743781.1000000015,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:27:00',11.87,11.86,11.87,11.86,91400.0,1084428.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:28:00',11.87,11.88,11.89,11.87,79000.0,938308.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:29:00',11.88,11.88,11.89,11.88,20400.0,242424.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:30:00',11.88,11.9,11.9,11.88,57000.0,677808.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:31:00',11.9,11.89,11.91,11.88,37100.0,441204.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:32:00',11.9,11.91,11.91,11.89,23800.0,283159.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:33:00',11.91,11.91,11.91,11.9,22600.0,269057.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:34:00',11.91,11.92,11.92,11.9,12000.0,142938.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:35:00',11.92,11.9,11.93,11.9,29500.0,351508.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:36:00',11.9,11.9,11.9,11.9,16700.0,198730.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:37:00',11.9,11.91,11.93,11.89,9000.0,107172.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:38:00',11.91,11.93,11.93,11.91,9600.0,114429.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:39:00',11.93,11.93,11.94,11.92,10800.0,128874.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:40:00',11.92,11.94,11.94,11.92,17100.0,204038.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:41:00',11.94,11.97,11.97,11.93,52700.0,629486.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:42:00',11.97,11.94,11.97,11.93,36500.0,436116.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:43:00',11.94,11.94,11.94,11.94,13100.0,156482.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:44:00',11.94,11.94,11.96,11.94,8900.0,106304.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:45:00',11.94,11.96,11.96,11.92,24700.0,294770.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:46:00',11.93,11.93,11.94,11.93,20300.0,242182.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:47:00',11.93,11.95,11.96,11.93,20800.0,248404.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:48:00',11.94,11.94,11.95,11.94,38800.0,463298.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:49:00',11.94,11.94,11.94,11.93,41400.0,494259.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:50:00',11.94,11.95,11.96,11.94,83000.0,991746.0000000075,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:51:00',11.96,11.96,11.96,11.95,12100.0,144703.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:52:00',11.96,11.95,11.96,11.95,5100.0,60978.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:53:00',11.95,11.97,11.97,11.95,9900.0,118432.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:54:00',11.97,11.96,11.97,11.95,28700.0,343045.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:55:00',11.96,11.95,11.96,11.95,24600.0,294016.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:56:00',11.95,11.94,11.95,11.94,23700.0,283225.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:57:00',11.94,11.93,11.95,11.93,31000.0,370132.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:58:00',11.93,11.95,11.95,11.93,17500.0,208955.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 10:59:00',11.95,11.93,11.95,11.92,32900.0,392361.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:00:00',11.93,11.94,11.94,11.92,13200.0,157433.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:01:00',11.94,11.95,11.95,11.93,13500.0,161150.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:02:00',11.93,11.95,11.95,11.93,11900.0,142081.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:03:00',11.95,11.95,11.95,11.94,7700.0,92001.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:04:00',11.95,11.95,11.95,11.95,8700.0,103965.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:05:00',11.95,11.94,11.95,11.94,5900.0,70494.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:06:00',11.95,11.95,11.96,11.94,10700.0,127840.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:07:00',11.93,11.96,11.96,11.93,6400.0,76470.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:08:00',11.95,11.97,11.97,11.94,15300.0,183070.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:09:00',11.97,11.96,11.97,11.95,19000.0,227279.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:10:00',11.96,11.96,11.97,11.95,4900.0,58625.25,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:11:00',11.96,11.96,11.97,11.96,12300.0,147188.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:12:00',11.97,11.97,11.98,11.95,5900.0,70631.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:13:00',11.97,11.97,11.97,11.96,9600.0,114856.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:14:00',11.97,11.96,11.97,11.96,14900.0,178237.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:15:00',11.97,11.97,11.97,11.97,3500.0,41895.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:16:00',11.97,11.97,11.97,11.97,6500.0,77777.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:17:00',11.97,11.97,11.97,11.97,15600.0,186732.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:18:00',11.97,11.98,11.98,11.97,38900.0,465951.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:19:00',11.98,11.99,11.99,11.98,3500.0,41934.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:20:00',11.99,11.99,12.0,11.98,55500.0,665383.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:21:00',11.99,11.99,11.99,11.97,35200.0,421620.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:22:00',11.99,12.0,12.0,11.97,41000.0,491893.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:23:00',11.99,11.99,12.0,11.99,4700.0,56370.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:24:00',12.0,11.99,12.0,11.99,18200.0,218234.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:25:00',11.98,11.99,11.99,11.98,7800.0,93504.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:26:00',11.99,12.0,12.0,11.99,7400.0,88708.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:27:00',12.0,11.98,12.0,11.98,17100.0,204902.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:28:00',11.98,11.97,11.98,11.97,17500.0,209492.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:29:00',11.97,11.97,11.98,11.97,28200.0,337559.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 11:30:00',11.97,11.98,11.98,11.97,1800.0,21554.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:01:00',11.98,11.98,11.98,11.97,72300.0,865768.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:02:00',11.98,11.97,11.98,11.96,32400.0,387625.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:03:00',11.96,11.95,11.97,11.94,21600.0,258199.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:04:00',11.96,11.95,11.96,11.95,32600.0,389777.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:05:00',11.96,11.96,11.97,11.95,7900.0,94441.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:06:00',11.96,11.96,11.97,11.96,2400.0,28708.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:07:00',11.96,11.95,11.96,11.95,3100.0,37050.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:08:00',11.96,11.96,11.96,11.95,3500.0,41832.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:09:00',11.96,11.96,11.97,11.95,12500.0,149502.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:10:00',11.96,11.97,11.97,11.95,1600.0,19136.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:11:00',11.96,11.96,11.96,11.95,17300.0,206748.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:12:00',11.96,11.97,11.97,11.95,47600.0,568842.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:13:00',11.97,11.95,11.97,11.94,19100.0,228067.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:14:00',11.95,11.93,11.96,11.93,10700.0,127777.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:15:00',11.95,11.95,11.96,11.93,42300.0,505124.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:16:00',11.96,11.95,11.96,11.94,8800.0,105194.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:17:00',11.96,11.95,11.96,11.93,34000.0,405679.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:18:00',11.95,11.93,11.95,11.93,24800.0,295882.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:19:00',11.94,11.94,11.94,11.93,20900.0,249431.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:20:00',11.93,11.94,11.94,11.93,1200.0,14324.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:21:00',11.94,11.95,11.95,11.93,41100.0,490779.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:22:00',11.94,11.94,11.96,11.94,10000.0,119503.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:23:00',11.94,11.94,11.96,11.94,8800.0,105179.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:24:00',11.95,11.95,11.95,11.94,4500.0,53747.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:25:00',11.95,11.93,11.95,11.93,7700.0,91918.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:26:00',11.93,11.94,11.94,11.92,8600.0,102590.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:27:00',11.94,11.93,11.94,11.92,11600.0,138370.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:28:00',11.93,11.95,11.95,11.92,37200.0,443933.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:29:00',11.94,11.95,11.96,11.94,31301.0,374046.9499999881,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:30:00',11.95,11.96,11.96,11.94,41700.0,498496.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:31:00',11.96,11.99,11.99,11.96,127800.0,1530479.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:32:00',11.99,11.97,11.99,11.97,15900.0,190415.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:33:00',11.98,11.98,11.98,11.96,41400.0,495799.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:34:00',11.98,12.01,12.01,11.98,104700.0,1255640.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:35:00',12.01,11.98,12.03,11.98,141400.0,1695886.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:36:00',11.98,11.99,12.02,11.97,117300.0,1405537.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:37:00',12.0,12.01,12.02,12.0,37700.0,453031.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:38:00',12.01,12.06,12.07,12.0,237200.0,2857519.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:39:00',12.07,12.06,12.08,12.05,128500.0,1550267.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:40:00',12.06,12.04,12.07,12.04,36100.0,435110.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:41:00',12.04,12.04,12.05,12.02,18000.0,216603.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:42:00',12.04,12.03,12.04,12.02,36900.0,443792.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:43:00',12.04,12.04,12.04,12.03,29900.0,359889.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:44:00',12.04,12.04,12.04,12.03,53800.0,647334.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:45:00',12.04,12.03,12.04,12.02,17000.0,204427.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:46:00',12.03,12.03,12.03,12.02,22700.0,272989.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:47:00',12.03,12.03,12.03,12.02,70200.0,844460.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:48:00',12.03,12.01,12.03,12.01,34300.0,412246.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:49:00',12.03,12.02,12.03,12.01,34100.0,409855.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:50:00',12.02,12.01,12.03,12.01,21200.0,254718.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:51:00',12.02,12.02,12.03,12.01,48500.0,582911.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:52:00',12.02,12.01,12.03,12.01,21300.0,255961.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:53:00',12.02,12.0,12.02,12.0,37987.0,456280.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:54:00',12.0,11.99,12.01,11.99,34913.0,418955.13000001013,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:55:00',11.99,12.01,12.01,11.99,32600.0,391159.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:56:00',12.0,12.0,12.01,12.0,14000.0,168017.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:57:00',12.0,12.0,12.01,12.0,20900.0,250812.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:58:00',12.01,12.01,12.01,12.0,24300.0,291781.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 13:59:00',12.0,12.0,12.02,11.99,18487.0,221915.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:00:00',12.01,12.02,12.02,12.0,15300.0,183768.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:01:00',12.02,12.02,12.02,12.01,2300.0,27645.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:02:00',12.02,12.02,12.02,12.02,1900.0,22837.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:03:00',12.02,12.01,12.02,12.01,14500.0,174166.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:04:00',12.01,12.0,12.02,12.0,16700.0,200511.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:05:00',12.0,12.01,12.02,12.0,14100.0,169334.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:06:00',12.01,12.01,12.01,12.0,21100.0,253376.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:07:00',12.02,12.01,12.02,12.01,4400.0,52854.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:08:00',12.0,12.0,12.01,12.0,26513.0,318184.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:09:00',12.0,12.01,12.01,12.0,25687.0,308279.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:10:00',12.0,12.0,12.01,11.99,20200.0,242234.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:11:00',12.0,11.99,12.0,11.99,15500.0,185871.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:12:00',11.99,12.0,12.0,11.99,28100.0,336946.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:13:00',12.0,11.99,12.0,11.98,28800.0,345236.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:14:00',11.99,11.98,11.99,11.98,17400.0,208541.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:15:00',11.99,12.0,12.0,11.99,33300.0,399287.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:16:00',12.0,12.0,12.0,11.99,32200.0,386236.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:17:00',12.0,12.0,12.0,11.99,21159.0,253877.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:18:00',12.0,12.01,12.01,12.0,4200.0,50427.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:19:00',12.01,12.01,12.01,12.0,4100.0,49227.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:20:00',12.01,12.01,12.01,11.98,47200.0,566171.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:21:00',12.0,12.0,12.01,12.0,24800.0,297639.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:22:00',12.0,12.0,12.02,12.0,64500.0,774303.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:23:00',12.02,12.02,12.03,12.0,17100.0,205518.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:24:00',12.02,12.02,12.03,12.01,33200.0,399297.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:25:00',12.02,12.02,12.03,12.02,17600.0,211601.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:26:00',12.02,12.01,12.02,12.01,16200.0,194668.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:27:00',12.01,12.01,12.02,12.0,83700.0,1005081.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:28:00',12.01,12.01,12.01,11.99,49300.0,591313.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:29:00',12.01,12.01,12.02,12.0,14200.0,170574.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:30:00',12.01,12.02,12.02,12.0,65800.0,789746.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:31:00',12.0,11.98,12.02,11.98,42900.0,514641.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:32:00',12.0,12.0,12.01,11.99,20100.0,241232.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:33:00',12.0,12.0,12.0,11.99,64400.0,772180.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:34:00',12.0,12.0,12.01,11.98,77800.0,932959.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:35:00',12.0,11.99,12.01,11.99,19600.0,235230.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:36:00',11.99,12.0,12.01,11.98,35200.0,422031.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:37:00',12.0,12.01,12.01,12.0,20000.0,240035.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:38:00',12.0,11.99,12.01,11.99,38900.0,466642.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:39:00',12.0,11.99,12.01,11.99,48500.0,581947.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:40:00',12.0,12.0,12.0,11.97,76600.0,918304.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:41:00',12.0,12.0,12.0,11.97,38500.0,461387.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:42:00',12.0,11.98,12.0,11.96,65200.0,781070.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:43:00',11.98,11.95,11.98,11.95,185896.0,2222256.199999988,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:44:00',11.99,11.99,12.0,11.95,82800.0,991636.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:45:00',11.98,11.98,11.99,11.94,45600.0,545571.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:46:00',11.94,11.95,11.99,11.94,75200.0,899330.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:47:00',11.95,11.97,11.98,11.94,93700.0,1119454.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:48:00',11.97,11.93,11.97,11.93,45200.0,539657.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:49:00',11.93,11.92,11.94,11.9,245600.0,2926711.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:50:00',11.91,11.9,11.91,11.89,162800.0,1937561.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:51:00',11.9,11.9,11.91,11.88,168604.0,2004458.600000009,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:52:00',11.9,11.89,11.91,11.88,169296.0,2010980.399999991,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:53:00',11.89,11.88,11.9,11.86,179600.0,2131752.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:54:00',11.86,11.84,11.88,11.84,366700.0,4348833.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:55:00',11.85,11.85,11.86,11.84,169100.0,2002880.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:56:00',11.86,11.9,11.98,11.86,164200.0,1952380.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 14:57:00',11.9,11.89,11.94,11.88,59500.0,708274.0,12.08)") + tdSql.execute(f"insert into tb values ('2020-01-10 15:00:00',11.89,11.89,11.89,11.89,137000.0,1628930.0,12.08)") + + def check_max_min_results(self): + max_results = [11.48, 11.54, 11.82, 12.14, 12.19, 12.15, 12.15] + min_results = [10.90, 11.20, 11.41, 11.61, 11.82, 11.75, 11.86] + + for i in range(len(max_results)): + tdSql.checkData(i, 1, max_results[i]) + tdSql.checkData(i, 2, min_results[i]) + + def basic_query(self): + tdSql.query(f"select first(ts), max(high), min(high) from tb interval(1d)") + self.check_max_min_results() + + tdSql.query(f"select last(ts), max(high), min(high) from tb interval(1d)") + self.check_max_min_results() + + + def run(self): + dbname = "db" + tdSql.prepare() + self.prepare_data() + self.basic_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/percentile.py b/tests/system-test/2-query/percentile.py index 935f55a8c2..e01aae97c0 100644 --- a/tests/system-test/2-query/percentile.py +++ b/tests/system-test/2-query/percentile.py @@ -50,7 +50,7 @@ class TDTestCase: 'col12': f'binary({self.binary_length})', 'col13': f'nchar({self.nchar_length})' } - + self.tag_dict = { 'ts_tag' : 'timestamp', 't1': 'tinyint', @@ -85,19 +85,19 @@ class TDTestCase: self.tag_values = [ f'{self.tag_ts},{self.tag_tinyint},{self.tag_smallint},{self.tag_int},{self.tag_bigint},\ {self.tag_utint},{self.tag_usint},{self.tag_uint},{self.tag_ubint},{self.tag_float},{self.tag_double},{self.tag_bool},"{self.binary_str}","{self.nchar_str}"' - + ] - + self.param = [1,50,100] - + def insert_data(self,column_dict,tbname,row_num): - intData = [] + intData = [] floatData = [] insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str) for i in range(row_num): insert_list = [] self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts) - intData.append(i) + intData.append(i) floatData.append(i + 0.1) return intData,floatData def check_tags(self,tags,param,num,value): @@ -117,6 +117,20 @@ class TDTestCase: else: tdSql.query(f'select percentile({k}, {param}) from {self.ntbname}') tdSql.checkData(0, 0, np.percentile(floatData, param)) + + tdSql.query(f'select percentile(col1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100) from {self.ntbname}') + tdSql.checkData(0, 0, '[0.900000, 1.800000, 2.700000, 3.600000, 4.500000, 5.400000, 6.300000, 7.200000, 8.100000, 9.000000]') + + tdSql.query(f'select percentile(col1, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.ntbname}') + tdSql.checkData(0, 0, '[0.891000, 1.791000, 2.691000, 3.591000, 4.491000, 5.391000, 6.291000, 7.191000, 8.091000, 8.991000]') + + tdSql.error(f'select percentile(col1) from {self.ntbname}') + tdSql.error(f'select percentile(col1, -1) from {self.ntbname}') + tdSql.error(f'select percentile(col1, 101) from {self.ntbname}') + tdSql.error(f'select percentile(col1, col2) from {self.ntbname}') + tdSql.error(f'select percentile(1, col1) from {self.ntbname}') + tdSql.error(f'select percentile(col1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 101) from {self.ntbname}') + tdSql.execute(f'drop database {self.dbname}') def function_check_ctb(self): tdSql.execute(f'create database {self.dbname}') @@ -135,7 +149,7 @@ class TDTestCase: else: tdSql.query(f'select percentile({k}, {param}) from {self.stbname}_{i}') tdSql.checkData(0, 0, np.percentile(floatData, param)) - + for k,v in self.tag_dict.items(): for param in self.param: if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower(): @@ -145,11 +159,25 @@ class TDTestCase: data_num = tdSql.queryResult[0][0] tdSql.query(f'select percentile({k},{param}) from {self.stbname}_{i}') tdSql.checkData(0,0,data_num) - tdSql.execute(f'drop database {self.dbname}') + + tdSql.query(f'select percentile(col1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100) from {self.stbname}_0') + tdSql.checkData(0, 0, '[0.900000, 1.800000, 2.700000, 3.600000, 4.500000, 5.400000, 6.300000, 7.200000, 8.100000, 9.000000]') + + tdSql.query(f'select percentile(col1, 9.9, 19.9, 29.9, 39.9, 49.9, 59.9, 69.9, 79.9, 89.9, 99.9) from {self.stbname}_0') + tdSql.checkData(0, 0, '[0.891000, 1.791000, 2.691000, 3.591000, 4.491000, 5.391000, 6.291000, 7.191000, 8.091000, 8.991000]') + + tdSql.error(f'select percentile(col1) from {self.stbname}_0') + tdSql.error(f'select percentile(col1, -1) from {self.stbname}_0') + tdSql.error(f'select percentile(col1, 101) from {self.stbname}_0') + tdSql.error(f'select percentile(col1, col2) from {self.stbname}_0') + tdSql.error(f'select percentile(1, col1) from {self.stbname}_0') + tdSql.error(f'select percentile(col1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 101) from {self.stbname}_0') + + tdSql.execute(f'drop database {self.dbname}') def run(self): self.function_check_ntb() self.function_check_ctb() - + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py index 6af9b130ef..9b5da50e1f 100644 --- a/tests/system-test/2-query/unique.py +++ b/tests/system-test/2-query/unique.py @@ -433,7 +433,7 @@ class TDTestCase: tdSql.checkRows(11) tdSql.checkData(1,0,0) tdSql.checkData(10,0,9) - tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )") + tdSql.query(f"select unique(t1) v from (select _rowts , t1 , tbname from {dbname}.stb1 ) order by v desc") tdSql.checkRows(2) tdSql.checkData(0,0,4) tdSql.checkData(1,0,1) diff --git a/tools/shell/inc/shellAuto.h b/tools/shell/inc/shellAuto.h index f3ea87e4a5..6a317fe5c9 100644 --- a/tools/shell/inc/shellAuto.h +++ b/tools/shell/inc/shellAuto.h @@ -16,6 +16,8 @@ #ifndef __SHELL_AUTO__ #define __SHELL_AUTO__ +#include "shellInt.h" + #define TAB_KEY 0x09 // press tab key @@ -28,7 +30,7 @@ void pressOtherKey(char c); bool shellAutoInit(); // set conn -void shellSetConn(TAOS* conn); +void shellSetConn(TAOS* conn, bool runOnce); // exit shell auto function, shell exit call once void shellAutoExit(); @@ -39,7 +41,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb); // introduction void printfIntroduction(); -// show all commands help +// show all commands help void showHelp(); #endif diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 81af5d7fe8..72386ba688 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -15,8 +15,8 @@ #define __USE_XOPEN -#include "shellInt.h" #include "shellAuto.h" +#include "shellInt.h" #include "shellTire.h" #include "tthread.h" @@ -26,12 +26,14 @@ #define UNION_ALL " union all " // extern function -void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos); -void shellGetPrevCharSize(const char* str, int32_t pos, int32_t* size, int32_t* width); -void shellShowOnScreen(SShellCmd* cmd); -void shellInsertChar(SShellCmd* cmd, char* c, int size); -void shellInsertStr(SShellCmd* cmd, char* str, int size); -bool appendAfterSelect(TAOS* con, SShellCmd* cmd, char* p, int32_t len); +void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos); +void shellGetPrevCharSize(const char* str, int32_t pos, int32_t* size, int32_t* width); +void shellShowOnScreen(SShellCmd* cmd); +void shellInsertChar(SShellCmd* cmd, char* c, int size); +void shellInsertStr(SShellCmd* cmd, char* str, int size); +bool appendAfterSelect(TAOS* con, SShellCmd* cmd, char* p, int32_t len); +char* tireSearchWord(int type, char* pre); +bool updateTireValue(int type, bool autoFill); typedef struct SAutoPtr { STire* p; @@ -60,23 +62,22 @@ SWords shellCommands[] = { {"alter database " " ;", 0, 0, NULL}, - {"alter dnode balance ", 0, 0, NULL}, - {"alter dnode resetlog;", 0, 0, NULL}, - {"alter dnode debugFlag 141;", 0, 0, NULL}, - {"alter dnode monitor 1;", 0, 0, NULL}, - {"alter all dnodes monitor ", 0, 0, NULL}, - {"alter alldnodes balance ", 0, 0, NULL}, - {"alter alldnodes resetlog;", 0, 0, NULL}, - {"alter alldnodes debugFlag 141;", 0, 0, NULL}, - {"alter alldnodes monitor 1;", 0, 0, NULL}, + {"alter dnode \"resetlog\";", 0, 0, NULL}, + {"alter dnode \"debugFlag\" \"141\";", 0, 0, NULL}, + {"alter dnode \"monitor\" \"0\";", 0, 0, NULL}, + {"alter dnode \"monitor\" \"1\";", 0, 0, NULL}, + {"alter all dnodes \"resetlog\";", 0, 0, NULL}, + {"alter all dnodes \"debugFlag\" \"141\";", 0, 0, NULL}, + {"alter all dnodes \"monitor\" \"0\";", 0, 0, NULL}, + {"alter all dnodes \"monitor\" \"1\";", 0, 0, NULL}, {"alter table ;", 0, 0, NULL}, {"alter table modify column", 0, 0, NULL}, - {"alter local resetlog;", 0, 0, NULL}, - {"alter local DebugFlag 143;", 0, 0, NULL}, - {"alter local cDebugFlag 143;", 0, 0, NULL}, - {"alter local uDebugFlag 143;", 0, 0, NULL}, - {"alter local rpcDebugFlag 143;", 0, 0, NULL}, - {"alter local tmrDebugFlag 143;", 0, 0, NULL}, + {"alter local \"resetlog\";", 0, 0, NULL}, + {"alter local \"DebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"cDebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"uDebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"rpcDebugFlag\" \"143\";", 0, 0, NULL}, + {"alter local \"tmrDebugFlag\" \"143\";", 0, 0, NULL}, {"alter topic", 0, 0, NULL}, {"alter user ;", 0, 0, NULL}, // 20 @@ -108,6 +109,7 @@ SWords shellCommands[] = { {"drop topic ;", 0, 0, NULL}, {"drop stream ;", 0, 0, NULL}, {"explain select", 0, 0, NULL}, // 44 append sub sql + {"flush database ;", 0, 0, NULL}, {"help;", 0, 0, NULL}, {"grant all on to ;", 0, 0, NULL}, {"grant read on to ;", 0, 0, NULL}, @@ -121,7 +123,6 @@ SWords shellCommands[] = { {"revoke read on from ;", 0, 0, NULL}, {"revoke write on from ;", 0, 0, NULL}, {"select * from ", 0, 0, NULL}, - {"select _block_dist() from \\G;", 0, 0, NULL}, {"select client_version();", 0, 0, NULL}, // 60 {"select current_user();", 0, 0, NULL}, @@ -248,7 +249,7 @@ char* db_options[] = {"keep ", "wal_retention_size ", "wal_segment_size "}; -char* alter_db_options[] = {"keep ", "cachemodel ", "cachesize ", "wal_fsync_period ", "wal_level "}; +char* alter_db_options[] = {"cachemodel ", "replica ", "keep ", "cachesize ", "wal_fsync_period ", "wal_level "}; char* data_types[] = {"timestamp", "int", "int unsigned", "varchar(16)", @@ -263,6 +264,13 @@ char* key_tags[] = {"tags("}; char* key_select[] = {"select "}; +char* key_systable[] = { + "ins_dnodes", "ins_mnodes", "ins_modules", "ins_qnodes", "ins_snodes", "ins_cluster", + "ins_databases", "ins_functions", "ins_indexes", "ins_stables", "ins_tables", "ins_tags", + "ins_users", "ins_grants", "ins_vgroups", "ins_configs", "ins_dnode_variables", "ins_topics", + "ins_subscriptions", "ins_streams", "ins_stream_tasks", "ins_vnodes", "ins_user_privileges", "perf_connections", + "perf_queries", "perf_consumers", "perf_trans", "perf_apps"}; + // // ------- global variant define --------- // @@ -294,8 +302,9 @@ bool waitAutoFill = false; #define WT_VAR_TBOPTION 16 #define WT_VAR_USERACTION 17 #define WT_VAR_KEYSELECT 18 +#define WT_VAR_SYSTABLE 19 -#define WT_VAR_CNT 19 +#define WT_VAR_CNT 20 #define WT_FROM_DB_MAX 6 // max get content from db #define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1) @@ -323,6 +332,7 @@ bool varMode = false; // enter var names list mode TAOS* varCon = NULL; SShellCmd* varCmd = NULL; +bool varRunOnce = false; SMatch* lastMatch = NULL; // save last match result int cntDel = 0; // delete byte count after next press tab @@ -330,25 +340,24 @@ int cntDel = 0; // delete byte count after next press tab void printfIntroduction() { printf(" ****************************** Tab Completion **********************************\n"); char secondLine[160] = "\0"; - sprintf(secondLine, " * The %s CLI supports tab completion for a variety of items, ", - shell.info.cusName); + sprintf(secondLine, " * The %s CLI supports tab completion for a variety of items, ", shell.info.cusName); printf("%s", secondLine); int secondLineLen = strlen(secondLine); - while (84-(secondLineLen++) > 0) { + while (84 - (secondLineLen++) > 0) { printf(" "); } printf("*\n"); - printf(" * including database names, table names, function names and keywords. *\n"); - printf(" * The full list of shortcut keys is as follows: *\n"); - printf(" * [ TAB ] ...... complete the current word *\n"); - printf(" * ...... if used on a blank line, display all valid commands *\n"); - printf(" * [ Ctrl + A ] ...... move cursor to the st[A]rt of the line *\n"); - printf(" * [ Ctrl + E ] ...... move cursor to the [E]nd of the line *\n"); - printf(" * [ Ctrl + W ] ...... move cursor to the middle of the line *\n"); - printf(" * [ Ctrl + L ] ...... clear the entire screen *\n"); - printf(" * [ Ctrl + K ] ...... clear the screen after the cursor *\n"); - printf(" * [ Ctrl + U ] ...... clear the screen before the cursor *\n"); - printf(" **********************************************************************************\n\n"); + printf(" * including database names, table names, function names and keywords. *\n"); + printf(" * The full list of shortcut keys is as follows: *\n"); + printf(" * [ TAB ] ...... complete the current word *\n"); + printf(" * ...... if used on a blank line, display all supported commands *\n"); + printf(" * [ Ctrl + A ] ...... move cursor to the st[A]rt of the line *\n"); + printf(" * [ Ctrl + E ] ...... move cursor to the [E]nd of the line *\n"); + printf(" * [ Ctrl + W ] ...... move cursor to the middle of the line *\n"); + printf(" * [ Ctrl + L ] ...... clear the entire screen *\n"); + printf(" * [ Ctrl + K ] ...... clear the screen after the cursor *\n"); + printf(" * [ Ctrl + U ] ...... clear the screen before the cursor *\n"); + printf(" **************************************************************************************\n\n"); } void showHelp() { @@ -357,23 +366,24 @@ void showHelp() { "\n\ ----- A ----- \n\ alter database \n\ - alter dnode balance \n\ - alter dnode resetlog;\n\ - alter all dnodes monitor \n\ - alter alldnodes balance \n\ - alter alldnodes resetlog;\n\ - alter alldnodes debugFlag \n\ - alter alldnodes monitor \n\ + alter dnode 'resetlog';\n\ + alter dnode 'monitor' '0';\n\ + alter dnode 'monitor' \"1\";\n\ + alter dnode \"debugflag\" \"143\";\n\ + alter all dnodes \"monitor\" \"0\";\n\ + alter all dnodes \"monitor\" \"1\";\n\ + alter all dnodes \"resetlog\";\n\ + alter all dnodes \"debugFlag\" \n\ alter table ;\n\ alter table modify column\n\ - alter local resetlog;\n\ - alter local DebugFlag 143;\n\ + alter local \"resetlog\";\n\ + alter local \"DebugFlag\" \"143\";\n\ alter topic\n\ alter user ...\n\ ----- C ----- \n\ create table using tags ...\n\ create database ...\n\ - create dnode ...\n\ + create dnode \"fqdn:port\" ...\n\ create index ...\n\ create mnode on dnode ;\n\ create qnode on dnode ;\n\ @@ -396,6 +406,8 @@ void showHelp() { drop stream ;\n\ ----- E ----- \n\ explain select clause ...\n\ + ----- F ----- \n\ + flush database ;\n\ ----- H ----- \n\ help;\n\ ----- I ----- \n\ @@ -418,7 +430,6 @@ void showHelp() { revoke write on from ;\n\ ----- S ----- \n\ select * from where ... \n\ - select _block_dist() from ;\n\ select client_version();\n\ select current_user();\n\ select database();\n\ @@ -629,12 +640,18 @@ bool shellAutoInit() { GenerateVarType(WT_VAR_TBOPTION, tb_options, sizeof(tb_options) / sizeof(char*)); GenerateVarType(WT_VAR_USERACTION, user_actions, sizeof(user_actions) / sizeof(char*)); GenerateVarType(WT_VAR_KEYSELECT, key_select, sizeof(key_select) / sizeof(char*)); + GenerateVarType(WT_VAR_SYSTABLE, key_systable, sizeof(key_systable) / sizeof(char*)); return true; } // set conn -void shellSetConn(TAOS* conn) { varCon = conn; } +void shellSetConn(TAOS* conn, bool runOnce) { + varCon = conn; + varRunOnce = runOnce; + // init database and stable + if (!runOnce) updateTireValue(WT_VAR_DBNAME, false); +} // exit shell auto function, shell exit call once void shellAutoExit() { @@ -777,6 +794,15 @@ int writeVarNames(int type, TAOS_RES* tres) { return numOfRows; } +void setThreadNull(int type) { + taosThreadMutexLock(&tiresMutex); + if (threads[type]) { + taosMemoryFree(threads[type]); + } + threads[type] = NULL; + taosThreadMutexUnlock(&tiresMutex); +} + bool firstMatchCommand(TAOS* con, SShellCmd* cmd); // // thread obtain var thread from db server @@ -792,6 +818,7 @@ void* varObtainThread(void* param) { TAOS_RES* pSql = taos_query(varCon, varSqls[type]); if (taos_errno(pSql)) { taos_free_result(pSql); + setThreadNull(type); return NULL; } @@ -807,12 +834,46 @@ void* varObtainThread(void* param) { firstMatchCommand(varCon, varCmd); } + setThreadNull(type); return NULL; } +// return true is need update value by async +bool updateTireValue(int type, bool autoFill) { + // TYPE CONTEXT GET FROM DB + taosThreadMutexLock(&tiresMutex); + + // check need obtain from server + if (tires[type] == NULL) { + waitAutoFill = autoFill; + // need async obtain var names from db sever + if (threads[type] != NULL) { + if (taosThreadRunning(threads[type])) { + // thread running , need not obtain again, return + taosThreadMutexUnlock(&tiresMutex); + return NULL; + } + // destroy previous thread handle for new create thread handle + taosDestroyThread(threads[type]); + threads[type] = NULL; + } + + // create new + void* param = taosMemoryMalloc(sizeof(int)); + *((int*)param) = type; + threads[type] = taosCreateThread(varObtainThread, param); + taosThreadMutexUnlock(&tiresMutex); + return true; + } + taosThreadMutexUnlock(&tiresMutex); + + return false; +} + // only match next one word from all match words, return valuue must free by caller char* matchNextPrefix(STire* tire, char* pre) { SMatch* match = NULL; + if (tire == NULL) return NULL; // re-use last result if (lastMatch) { @@ -898,32 +959,9 @@ char* tireSearchWord(int type, char* pre) { return matchNextPrefix(tire, pre); } - // TYPE CONTEXT GET FROM DB - taosThreadMutexLock(&tiresMutex); - - // check need obtain from server - if (tires[type] == NULL) { - waitAutoFill = true; - // need async obtain var names from db sever - if (threads[type] != NULL) { - if (taosThreadRunning(threads[type])) { - // thread running , need not obtain again, return - taosThreadMutexUnlock(&tiresMutex); - return NULL; - } - // destroy previous thread handle for new create thread handle - taosDestroyThread(threads[type]); - threads[type] = NULL; - } - - // create new - void* param = taosMemoryMalloc(sizeof(int)); - *((int*)param) = type; - threads[type] = taosCreateThread(varObtainThread, param); - taosThreadMutexUnlock(&tiresMutex); + if (updateTireValue(type, true)) { return NULL; } - taosThreadMutexUnlock(&tiresMutex); // can obtain var names from local STire* tire = getAutoPtr(type); @@ -1126,6 +1164,7 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { // main key press tab , matched return true else false bool firstMatchCommand(TAOS* con, SShellCmd* cmd) { + if (con == NULL || cmd == NULL) return false; // parse command SWords* input = (SWords*)taosMemoryMalloc(sizeof(SWords)); memset(input, 0, sizeof(SWords)); @@ -1496,9 +1535,9 @@ bool matchSelectQuery(TAOS* con, SShellCmd* cmd) { // if is input create fields or tags area, return true bool isCreateFieldsArea(char* p) { - // put to while, support like create table st(ts timestamp, bin1 binary(16), bin2 + blank + TAB + // put to while, support like create table st(ts timestamp, bin1 binary(16), bin2 + blank + TAB char* p1 = strdup(p); - bool ret = false; + bool ret = false; while (1) { char* left = strrchr(p1, '('); if (left == NULL) { @@ -1519,7 +1558,7 @@ bool isCreateFieldsArea(char* p) { ret = true; break; } - + // set string end by small for next strrchr search *left = 0; } @@ -1670,12 +1709,49 @@ bool matchOther(TAOS* con, SShellCmd* cmd) { return false; } +// last match if nothing matched +bool matchEnd(TAOS* con, SShellCmd* cmd) { + // str dump + bool ret = false; + char* ps = strndup(cmd->command, cmd->commandSize); + char* last = lastWord(ps); + char* elast = strrchr(last, '.'); // find end last + if (elast) { + last = elast + 1; + } + + // less one char can match + if (strlen(last) == 0) { + goto _return; + } + + // match database + if (elast == NULL) { + // dot need not completed with dbname + if (fillWithType(con, cmd, last, WT_VAR_DBNAME)) { + ret = true; + goto _return; + } + } + + if (fillWithType(con, cmd, last, WT_VAR_SYSTABLE)) { + ret = true; + goto _return; + } + +_return: + taosMemoryFree(ps); + return ret; +} + // main key press tab void pressTabKey(SShellCmd* cmd) { - // check + // check empty tab key if (cmd->commandSize == 0) { - // empty - showHelp(); + // have multi line tab key + if (cmd->bufferSize == 0) { + showHelp(); + } shellShowOnScreen(cmd); return; } @@ -1705,6 +1781,9 @@ void pressTabKey(SShellCmd* cmd) { matched = matchSelectQuery(varCon, cmd); if (matched) return; + // match end + matched = matchEnd(varCon, cmd); + return; } @@ -1921,6 +2000,7 @@ void callbackAutoTab(char* sqlstr, TAOS* pSql, bool usedb) { if (dealUseDB(sql)) { // change to new db + if (!varRunOnce) updateTireValue(WT_VAR_STABLE, false); return; } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 1f79cfcc04..54d31cdb74 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -1098,10 +1098,11 @@ int32_t shellExecute() { } #endif - shellSetConn(shell.conn); + bool runOnce = pArgs->commands != NULL || pArgs->file[0] != 0; + shellSetConn(shell.conn, runOnce); shellReadHistory(); - if (pArgs->commands != NULL || pArgs->file[0] != 0) { + if (runOnce) { if (pArgs->commands != NULL) { printf("%s%s\r\n", shell.info.promptHeader, pArgs->commands); char *cmd = strdup(pArgs->commands); @@ -1161,5 +1162,8 @@ int32_t shellExecute() { taosThreadJoin(spid, NULL); shellCleanupHistory(); + taos_kill_query(shell.conn); + taos_close(shell.conn); + return 0; } diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index 4ac4cfb119..81c23035e2 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -224,8 +224,8 @@ void shellRunSingleCommandWebsocketImp(char *command) { res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout); int code = ws_errno(res); if (code != 0 && !shell.stop_query) { - // websocket interface masked off first bit from standard error number. - if (TSDB_CODE_PAR_SYNTAX_ERROR == (code|0x80000000)) { + // if it's not a ws connection error + if (TSDB_CODE_WS_DSN_ERROR != (code&TSDB_CODE_WS_DSN_ERROR)) { et = taosGetTimestampUs(); fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6); ws_free_result(res);