Merge branch '3.0' of github.com:taosdata/TDengine into szhou/python-udf

This commit is contained in:
slzhou 2023-02-23 14:44:14 +08:00
commit 7ec5a5df3a
177 changed files with 10265 additions and 5194 deletions

View File

@ -387,7 +387,7 @@ pipeline {
}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 55, unit: 'MINUTES'){
timeout(time: 75, unit: 'MINUTES'){
pre_test_win()
pre_test_build_win()
run_win_ctest()

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 634399d
GIT_TAG 61cbfd2
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -796,19 +796,23 @@ HISTOGRAM(exprbin_type, bin_description, normalized)
### PERCENTILE
```sql
PERCENTILE(expr, p)
PERCENTILE(expr, p [, p1] ...)
```
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
**Return value type**: DOUBLE
**Return value type**: This function takes 2 minumum and 11 maximum parameters, and it can simultaneously return 10 percentiles at most. If 2 parameters are given, a single percentile is returned and the value type is DOUBLE.
If more than 2 parameters are given, the return value type is a VARCHAR string, the format of which is a JSON ARRAY containing all return values.
**Applicable column types**: Numeric
**Applicable table types**: table only
**More explanations**: _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
**More explanations**:
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
- When calculating multiple percentiles of a specific column, a single PERCENTILE function with multiple parameters is adviced, as this can largely reduce the query response time.
For example, using SELECT percentile(col, 90, 95, 99) FROM table will perform better than SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table.
## Selection Functions

View File

@ -273,49 +273,48 @@ password: taosdata
## Start the TDengine cluster with docker-compose
1. The following docker-compose file starts a TDengine cluster with two replicas, two management nodes, two data nodes, and one arbitrator.
1. The following docker-compose file starts a TDengine cluster with three nodes.
```docker
version: "3"
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
td-1:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
```
```yml
version: "3"
services:
td-1:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
td-3:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-3"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td3:/var/lib/taos/
- taoslog-td3:/var/log/taos/
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
taosdata-td3:
taoslog-td3:
```
:::note
- The `VERSION` environment variable is used to set the tdengine image tag
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
- `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3]
We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment.
:::
2. Start the cluster
@ -345,17 +344,18 @@ password: taosdata
4. Show dnodes via TDengine CLI
```shell
$ docker-compose exec td-1 taos -s "show dnodes"
```shell
$ docker-compose exec td-1 taos -s "show dnodes"
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
======================================================================================================================================
1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | |
2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | |
0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - |
Query OK, 3 row(s) in set (0.000811s)
```
taos> show dnodes
id | endpoint | vnodes | support_vnodes | status | create_time | note |
======================================================================================================================================
1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | |
2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | |
3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | |
Query OK, 3 rows in database (0.021262s)
```
## taosAdapter
@ -373,83 +373,70 @@ password: taosdata
Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example:
```docker
version: "3"
```yml
version: "3"
networks:
inter:
api:
networks:
inter:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TAOS_SECOND_EP: "td-2"
deploy:
replicas: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
```
services:
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
entrypoint: "taosadapter"
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TAOS_SECOND_EP: "td-2"
deploy:
replicas: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
```
## Deploy with docker swarm

View File

@ -184,7 +184,7 @@ TDengine supports the standard JDBC 3.0 interface for manipulating databases, bu
To facilitate historical data migration, we provide a plug-in for the data synchronization tool DataX, which can automatically write data into TDengine.The automatic data migration of DataX can only support the data migration process of a single value model.
For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/blog/2021/10/26/3156.html).
For the specific usage of DataX and how to use DataX to write data to TDengine, please refer to [DataX-based TDengine Data Migration Tool](https://www.taosdata.com/engineering/16401.html).
After migrating via DataX, we found that we can significantly improve the efficiency of migrating historical data by starting multiple processes and migrating numerous metrics simultaneously. The following are some records of the migration process. We provide these as a reference for application migration.

View File

@ -798,18 +798,22 @@ HISTOGRAM(exprbin_type, bin_description, normalized)
### PERCENTILE
```sql
PERCENTILE(expr, p)
PERCENTILE(expr, p [, p1] ... )
```
**功能说明**:统计表中某列的值百分比分位数。
**返回数据类型** DOUBLE。
**返回数据类型** 该函数最小参数个数为 2 个,最大参数个数为 11 个。可以最多同时返回 10 个百分比分位数。当参数个数为 2 时, 返回一个分位数, 类型为DOUBLE,当参数个数大于 2 时返回类型为VARCHAR, 格式为包含多个返回值的JSON数组
**应用字段**:数值类型。
**适用于**:表。
**使用说明***P*值取值范围 0≤*P*≤100为 0 的时候等同于 MIN为 100 的时候等同于 MAX。
**使用说明**
- *P*值取值范围 0≤*P*≤100为 0 的时候等同于 MIN为 100 的时候等同于 MAX;
- 同时计算针对同一列的多个分位数时建议使用一个PERCENTILE函数和多个参数的方式能很大程度上降低查询的响应时间。
比如使用查询SELECT percentile(col, 90, 95, 99) FROM table, 性能会优于SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table。
## 选择函数

View File

@ -197,7 +197,7 @@ Total_Size. : 表 d0 所有 block 在文件中占用的大小为 93.65 KB
Average_size: 平均每个 block 在文件中占用的空间大小为 18.73 KB
Compression_Ratio: 数据压缩率 23.98%
Compression_Ratio: 数据压缩率 23.98%
*************************** 2.row ***************************
@ -212,16 +212,18 @@ MinRows BLOCK 中最小的行数,为 3616 行
MaxRows BLOCK 中最大的行数,为 4096行
Average_Rows BLOCK 中的平均行数为4000 行
Average_Rows 每个 BLOCK 中的平均行数为4000 行
*************************** 3.row ***************************
_block_dist: Total_Tables=[1] Total_Files=[2]
_block_dist: Total_Tables=[1] Total_Files=[2] Total_Vgroups=[1]
Total_Tables: 表示子表的个数,这里为1
Total_Tables: 子表的个数,这里为 1
Total_Files 表数据保存在几个文件中,这里保存在 2 个文件中
Total_Files 表数据被分别保存的数据文件数量,这里是 2 个文件
Total_Vgroups 表数据分布的虚拟节点vnode数量
*************************** 5.row ***************************

View File

@ -309,7 +309,7 @@ services:
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td3:/var/lib/taos/
- taoslog-td3:/var/log/taos/
- taoslog-td3:/var/log/taos/
volumes:
taosdata-td1:
taoslog-td1:
@ -473,18 +473,18 @@ Creating service taos_adapter
```shell
$ docker stack ps taos
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago
o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago
o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
q5m1oxs589cp taos_td-2.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0
ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0
crmhdjw6vxw0 taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0
o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0
rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0
```
@ -495,11 +495,11 @@ rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0
```shell
$ docker service scale taos_adapter=1
taos_adapter scaled to 1
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
verify: Service converged
$ docker service ls -f name=taos_adapter
ID NAME MODE REPLICAS IMAGE PORTS
ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0
ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0
```

View File

@ -18,82 +18,15 @@ title: OpenTSDB 应用迁移到 TDengine 的最佳实践
如果我们将原本运行在 OpenTSDB 上的应用迁移到 TDengine 上,不仅可以有效地降低计算和存储资源的占用、减少部署服务器的规模,还能够极大减少运行维护的成本的输出,让运维管理工作更简单、更轻松,大幅降低总拥有成本。与 OpenTSDB 一样TDengine 也已经进行了开源,不同的是,除了单机版,后者还实现了集群版开源,被厂商绑定的顾虑一扫而空。
在下文中我们将就“使用最典型并广泛应用的运维监控DevOps场景”来说明如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。后续的章节会做更深度的介绍,以便于进行非 DevOps 场景的迁移。
在下文中我们将说明如何在不编码的情况下将 OpenTSDB 的应用快速、安全、可靠地迁移到 TDengine 之上。
## DevOps 应用快速迁移
## TDengine 与 OpenTSDB 的差异
### 1、典型应用场景
一个典型的 DevOps 应用场景的系统整体的架构如下图(图 1 所示。
**图 1. DevOps 场景中典型架构**
![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Arch.webp "图1. DevOps 场景中典型架构")
在该应用场景中包含了部署在应用环境中负责收集机器度量Metrics、网络度量Metrics以及应用度量Metrics的 Agent 工具、汇聚 Agent 收集信息的数据收集器数据持久化存储和管理的系统以及监控数据可视化工具例如Grafana 等)。
其中,部署在应用节点的 Agents 负责向 collectd/Statsd 提供不同来源的运行指标collectd/StatsD 则负责将汇聚的数据推送到 OpenTSDB 集群系统,然后使用可视化看板 Grafana 将数据可视化呈现出来。
### 2、迁移服务
- **TDengine 安装部署**
首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。
注意,安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后再启动。
- **调整数据收集器配置**
在 TDengine 2.4 版本中,包含一个组件 taosAdapter。taosAdapter 是一个无状态、可快速弹性伸缩的组件,它可以兼容 Influxdb 的 Line Protocol 和 OpenTSDB 的 telnet/JSON 写入协议规范,提供了丰富的数据接入能力,有效的节省用户迁移成本,降低用户应用迁移的难度。
用户可以根据需求弹性部署 taosAdapter 实例,结合场景的需要,快速提升数据写入的吞吐量,为不同应用场景下的数据写入提供保障。
通过 taosAdapter用户可以将 collectd 或 StatsD 收集的数据直接推送到 TDengine 实现应用场景的无缝迁移非常的轻松便捷。taosAdapter 还支持 Telegraf、Icinga、TCollector 、node_exporter 的数据接入,使用详情参考[taosAdapter](/reference/taosadapter/)。
如果使用 collectd修改其默认位置 `/etc/collectd/collectd.conf` 的配置文件为指向 taosAdapter 部署的节点 IP 地址和端口。假设 taosAdapter 的 IP 地址为 192.168.1.130,端口为 6046配置如下
```html
LoadPlugin write_tsdb
<Plugin write_tsdb>
<Node>
Host "192.168.1.130" Port "6046" HostTags "status=production" StoreRates
false AlwaysAppendDS false
</Node>
</Plugin>
```
即可让 collectd 将数据使用推送到 OpenTSDB 的插件方式推送到 taosAdapter taosAdapter 将调用 API 将数据写入到 TDengine 中,从而完成数据的写入工作。如果你使用的是 StatsD 相应地调整配置文件信息。
- **调整看板Dashboard系统**
在数据能够正常写入 TDengine 后,可以调整适配 Grafana 将写入 TDengine 的数据可视化呈现出来。获取和使用 TDengine 提供的 Grafana 插件请参考[与其他工具的连接](/third-party/grafana)。
TDengine 提供了默认的两套 Dashboard 模板,用户只需要将 Grafana 目录下的模板导入到 Grafana 中即可激活使用。
**图 2. 导入 Grafana 模板**
![TDengine Database IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard](./IT-DevOps-Solutions-Immigrate-OpenTSDB-Dashboard.webp "图2. 导入 Grafana 模板")
操作完以上步骤后,就完成了将 OpenTSDB 替换成为 TDengine 的迁移工作。可以看到整个流程非常简单,不需要写代码,只需要对某些配置文件进行调整即可完成全部的迁移工作。
### 3、迁移后架构
完成迁移以后,此时的系统整体的架构如下图(图 3所示而整个过程中采集端、数据写入端、以及监控呈现端均保持了稳定除了极少的配置调整外不涉及任何重要的更改和变动。OpenTSDB 大量的应用场景均为 DevOps ,这种场景下,简单的参数设置即可完成 OpenTSDB 到 TDengine 迁移动作,使用上 TDengine 更加强大的处理能力和查询性能。
在绝大多数的 DevOps 场景中,如果你拥有一个小规模的 OpenTSDB 集群3 台及以下的节点)作为 DevOps 的存储端,依赖于 OpenTSDB 为系统持久化层提供数据存储和查询功能,那么你可以安全地将其替换为 TDengine并节约更多的计算和存储资源。在同等计算资源配置情况下单台 TDengine 即可满足 3 ~ 5 台 OpenTSDB 节点提供的服务能力。如果规模比较大,那便需要采用 TDengine 集群。
如果你的应用特别复杂,或者应用领域并不是 DevOps 场景,你可以继续阅读后续的章节,更加全面深入地了解将 OpenTSDB 的应用迁移到 TDengine 的高级话题。
**图 3. 迁移完成后的系统架构**
![TDengine Database IT-DevOps-Solutions-Immigrate-TDengine-Arch](./IT-DevOps-Solutions-Immigrate-TDengine-Arch.webp "图 3. 迁移完成后的系统架构")
## 其他场景的迁移评估与策略
### 1、TDengine 与 OpenTSDB 的差异
本章将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本章的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。
本节将详细介绍 OpenTSDB 与 TDengine 在系统功能层面上存在的差异。阅读完本节的内容,你可以全面地评估是否能够将某些基于 OpenTSDB 的复杂应用迁移到 TDengine 上,以及迁移之后应该注意的问题。
TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的应用中使用了 Grafana 以外的前端看板(例如[TSDash](https://github.com/facebook/tsdash)、[Status Wolf](https://github.com/box/StatusWolf)等),那么前端看板将无法直接迁移到 TDengine需要将前端看板重新适配到 Grafana 才可以正常运行。
在 2.3.0.x 版本中TDengine 只能够支持 collectd 和 StatsD 作为数据收集汇聚软件,当然后面会陆续提供更多的数据收集聚合软件的接入支持。如果您的收集端使用了其他类型的数据汇聚器,您的应用需要适配到这两个数据汇聚端系统,才能够将数据正常写入。除了上述两个数据汇聚端软件协议以外,TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。
如果您的收集端使用了像 collectd 和 StatsD 这样的数据采集工具,要重新配置这些数据采集工具将数据写入到 TDengine。TDengine 还支持通过 InfluxDB 的行协议和 OpenTSDB 的数据写入协议、JSON 格式将数据直接写入,您可以重写数据推送端的逻辑,使用 TDengine 支持的行协议来写入数据。
此外,如果你的应用中使用了 OpenTSDB 以下特性,在将应用迁移到 TDengine 之前你还需要了解以下注意事项:
@ -104,11 +37,11 @@ TDengine 当前只支持 Grafana 的可视化看板呈现,所以如果你的
通过上面的介绍,相信你应该能够了解 OpenTSDB 迁移到 TDengine 带来的变化,这些信息也有助于你正确地判断是否可以接受将应用 迁移到 TDengine 之上,体验 TDengine 提供的强大的时序数据处理能力和便捷的使用体验。
### 2、迁移策略
## 迁移策略
首先将基于 OpenTSDB 的系统进行迁移涉及到的数据模式设计、系统规模估算、数据写入端改造,进行数据分流、应用适配工作;之后将两个系统并行运行一段时间,再将历史数据迁移到 TDengine 中。当然如果你的应用中有部分功能强依赖于上述 OpenTSDB 特性,同时又不希望停止使用,可以考虑保持原有的 OpenTSDB 系统运行,同时启动 TDengine 来提供主要的服务。
## 数据模型设计
### 数据模型设计
一方面TDengine 要求其入库的数据具有严格的模式定义。另一方面TDengine 的数据模型相对于 OpenTSDB 来说又更加丰富,多值模型能够兼容全部的单值模型的建立需求。
@ -150,7 +83,7 @@ insert into memory_vm130_memory_buffered_collectd using memory tags(vm130
如果你想要利用 TDengine 的多值模型能力,需要首先满足以下要求:不同的采集量具有相同的采集频率,且能够通过消息队列**同时到达**数据写入端,从而确保使用 SQL 语句将多个指标一次性写入。将度量的名称作为超级表的名称,建立具有相同采集频率且能够同时到达的数据多列模型。子表的表名采用具有固定规则的方式进行命名。上述每个度量均只包含一个测量值,因此无法将其转化为多值模型。
## 数据分流与应用适配
### 数据分流与应用适配
从消息队列中订阅数据,并启动调整后的写入程序写入数据。
@ -166,15 +99,128 @@ TDengine 不支持采用 OpenTSDB 的查询语法进行查询或数据获取处
TDengine 支持标准的 JDBC 3.0 接口操纵数据库,你也可以使用其他类型的高级语言的连接器来查询读取数据,以适配你的应用。具体的操作和使用帮助也请参阅用户手册。
## 历史数据迁移
### 1、使用工具自动迁移数据
## 使用 DataX 迁移数据
为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。
为了方便历史数据的迁移工作,我们为数据同步工具 DataX 提供了适配 TDengine 3.0 的插件,能够将数据自动写入到 TDengine 中,需要注意的是 DataX 的自动化数据迁移只能够支持单值模型的数据迁移过程。
DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参见[基于 DataX 的 TDengine 数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)。
### 安装和部署 TDengine
在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。
在进行数据迁移之前,要有一个正确运行的 TDengine 集群。首先是 TDengine 的安装,从官网上下载 TDengine 最新稳定版进行安装。各种安装包的使用帮助请参考 [安装指南](../../get-started/package)
安装完成后,请根据 [部署指南](../../deployment/deploy) 配置集群。
### 插件功能介绍
1. TDengine30Reader 提供的功能:
1. 支持通过 SQL 进行数据筛选;
2. 根据时间间隔进行任务切分;
3. 支持 TDengine 的全部数据类型;
4. 支持批量读取,通过 batchSize 参数控制批量拉取结果集的大小,提高读取性能。
2. TDengine30Writer 支持的功能:
1. 支持 OpenTSDB 的 json 格式的行协议,使用 TDengine 的 schemaless 方式写入 TDengine。
2. 支持批量写入,通过 batchSize 参数控制批量写入的数量,提高写入性能。
### DataX 安装环境准备
1. 需要安装 TDengine 客户端
2. 需要安装 JDK 1.8 环境(运行 DataX
3. 需要安装 Python 环境(运行 DataX
4. 需要 maven 编译环境(如果不编译 DataX 则可以不安装 maven
### 安装
1. 下载源码
~~~
git clone https://github.com/taosdata/DataX.git
~~~
2. 编译打包
~~~
cd DataX
mvn -U clean package assembly:assembly -Dmaven.test.skip=true
~~~
3. 安装
~~~
cp target/datax.tar.gz your_install_dir
cd your_install_dir
tar -zxvf dataX.tar.gz
~~~
### 数据迁移 Job 的配置
以一个从 OpenTSDB 到 TDengine 3.0 版本的数据迁移任务为例,配置文件 opentsdb2tdengine.json 如下:
~~~
{
"job":{
"content":[{
"reader": {
"name": "opentsdbreader",
"parameter": {
"endpoint": "http://192.168.1.180:4242",
"column": ["weather_temperature"],
"beginDateTime": "2021-01-01 00:00:00",
"endDateTime": "2021-01-01 01:00:00"
}
},
"writer": {
"name": "tdengine30writer",
"parameter": {
"username": "root",
"password": "taosdata",
"connection": [
{
"table": [
"matric1"
],
"jdbcUrl": "jdbc:TAOS://192.168.1.101:6030/test?timestampFormat=TIMESTAMP"
}
],
"batchSize": 1000,
"ignoreTagsUnmatched": true
}
}
}],
"setting": {
"speed": {
"channel": 1
}
}
}
}
~~~
配置说明:
1. 上面的配置表示,从 192.168.1.180 的 OpenTSDB到 192.168.1.101 的 TDengine 的迁移。迁移 metric 为 weather_temperature时间从 2021-01-01 00:00:00 开始,到 2021-01-01 01:00:00 结束的数据。
2. reader 使用 datax 的 opentsdbreaderparameter 的配置请参考:[opentsdbreader.md#配置参数](https://github.com/taosdata/DataX/blob/master/opentsdbreader/doc/opentsdbreader.md)
3. tdengine30writer 的 parameter 中userpassword 为必须项没有默认值。batchSize 不是必须项,默认值为 1。详细参考[tdengine30writer.md#配置参数](https://github.com/taosdata/DataX/blob/master/tdengine30writer/doc/tdengine30writer-CN.md)
4. TDengine 中,如果 dbname 指定的 database 不存在,则需要在迁移前创建数据库。
### 执行迁移任务
~~~
python bin/datax.py job/opentsdb2tdengine.json
~~~
### 限制条件
1. 目前DataX 自带的 opentsdbreader 仅支持 OpenTSDB-2.3.X 版本。详细参考:[opentsdbreader#约束限制](https://github.com/alibaba/DataX/blob/master/opentsdbreader/doc/opentsdbreader.md#5-%E7%BA%A6%E6%9D%9F%E9%99%90%E5%88%B6)
2. 数据迁移工具依赖 TDengine 客户端中的 `libtaos.so/taos.dll/libtaos.dylib`,需要与服务端对应版本的 TDengine-client。
### FAQ
1. 如何估算一个数据迁移任务所需要的资源
DataX 的每个 reader 按照自己的 task 切分策略进行任务划分,具体请参考 DataX 的任务调度规则。在估算资源是,需要按照数据迁移的数据量,任务切分规则和网络带宽限制等综合考虑,最好以实际数据迁移测试结果为准。
2. TDengine30Writer 的 batchSize 设置多大效率最高?
batchSize 是控制批量写入的参数,在获取 batchSize 行纪录后TDengineWriter 会向 TDengine 发送一次写入请求,这减少了与 TDengine 交互次数从而提高了性能。从测试结果来看batchSize 在 500-1000 范围内效率最高。
3. job 的配置中 channel 数为多少合适?
job 中的 channel 数为流量控制的参数,每个 channel 都需要开辟一块内存,用来缓存数据。如果 channel 设置过大,会引起 OOM所以 channel 数并不是越大越好。增加 channel 数后,需要提高 JVM 内存大小。从测试结果来看channel 在 16 的范围内都是合适,能够保证 DataX 的流量最大化即可。
4. java.sql.SQLException: TDengine ERROR (8000060b): Invalid client value
配置文件中 column 中没有配置 tbname此时会触发行协议数据写入行协议写入只会自动创建子表名但需要提前创建好超级表行协议写入的情况下不支持 TAG 数据类型为非 NCHAR所以此种情况有两种解决方案1.将 TAG 全部修改为 NCHAR 类型2.在 Column 中配置好表名称这样不会触发行协议写入。
5. java.sql.SQLException: TDengine ERROR (8000060b): Timestamp data out of range
配置文件中 column 中没有配置 tbname此时会触发行协议数据写入且 TAG 全部为 NCHAR 类型,此时需要保证时间戳的一列名称为 _ts而不能是其他名称行协议写入下默认将最后的时间戳写入到 _ts 一列,且不能随意命名)。若想避免请使用 tbname 指定表名以避免触发行协议写入。
### 提升性能
在对 DataX 进行迁移实践后,我们发现通过启动多个进程,同时迁移多个 metric 的方式,可以大幅度的提高迁移历史数据的效率,下面是迁移过程中的部分记录,希望这些能为应用迁移工作带来参考。
| DataX 实例个数 (并发进程个数) | 迁移记录速度 (条/秒) |
| ----------------------------- | --------------------- |
@ -184,9 +230,9 @@ DataX 具体的使用方式及如何使用 DataX 将数据写入 TDengine 请参
| 5 | 约 29.5 万 |
| 10 | 约 33 万 |
<br/>(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备channel 和 batchSize 分别为 8 和 1000每条记录包含 10 个 tag)
(注:测试数据源自 单节点 Intel(R) Core(TM) i7-10700 CPU@2.90GHz 16 核 64G 硬件设备channel 和 batchSize 分别为 8 和 1000每条记录包含 10 个 tag)
### 2、手动迁移数据
## 手动迁移数据
如果你需要使用多值模型进行数据写入,就需要自行开发一个将数据从 OpenTSDB 导出的工具,然后确认哪些时间线能够合并导入到同一个时间线,再将可以同时导入的时间通过 SQL 语句的写入到数据库中。
@ -393,31 +439,11 @@ WHERE ts>=1510560000 AND ts<=1515000009
综上所述,可使用单台 16 核 32GB 的机器,或者使用 2 台 8 核 16GB 机器构成的集群。
## 附录 3: 集群部署及启动
TDengine 提供了丰富的帮助文档说明集群安装、部署的诸多方面的内容,这里提供相应的文档列表,供你参考。
### 集群部署
首先是安装 TDengine从官网上下载 TDengine 最新稳定版,解压缩后运行 install.sh 进行安装。各种安装包的使用帮助请参见博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。
注意安装完成以后,不要立即启动 `taosd` 服务,在正确配置完成参数以后才启动 `taosd` 服务。
### 设置运行参数并启动服务
为确保系统能够正常获取运行的必要信息。请在服务端正确设置以下关键参数:
FQDN、firstEp、secondEP、dataDir、logDir、tmpDir、serverPort。各参数的具体含义及设置的要求可参见文档《[TDengine 集群安装、管理](/cluster/)》
按照相同的步骤,在需要运行的节点上设置参数,并启动 `taosd` 服务,然后添加 Dnode 到集群中。
最后启动 `taos` 命令行程序,执行命令 `show dnodes`,如果能看到所有的加入集群的节点,那么集群顺利搭建完成。具体的操作流程及注意事项,请参阅文档《[TDengine 集群安装、管理](/cluster/)》
## 附录 4: 超级表名称
## 附录 3: 超级表名称
由于 OpenTSDB 的 metric 名称中带有点号(“.”例如“cpu.usage_user”这种名称的 metric。但是点号在 TDengine 中具有特殊含义是用来分隔数据库和表名称的分隔符。TDengine 也提供转义符,以允许用户在(超级)表名称中使用关键词或特殊分隔符(如:点号)。为了使用特殊字符,需要采用转义字符将表的名称括起来,例如:`cpu.usage_user`这样就是合法的(超级)表名称。
## 附录 5:参考文章
## 附录 4参考文章
1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](/application/collectd/)
2. [通过 collectd 将采集数据直接写入 TDengine](/third-party/collectd/)
1. [使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统](../collectd/)
2. [通过 collectd 将采集数据直接写入 TDengine](../../third-party/collectd/)

View File

@ -92,7 +92,7 @@ int main(int argc, char *argv[])
}
// a simple way to parse input parameters
if (argc >= 3) strcpy(db, argv[2]);
if (argc >= 3) strncpy(db, argv[2], sizeof(db) - 1);
if (argc >= 4) points = atoi(argv[3]);
if (argc >= 5) numOfTables = atoi(argv[4]);

View File

@ -368,6 +368,12 @@ typedef struct SSortExecInfo {
int32_t readBytes; // read io bytes
} SSortExecInfo;
typedef struct STUidTagInfo {
char* name;
uint64_t uid;
void* pTagVal;
} STUidTagInfo;
// stream special block column
#define START_TS_COLUMN_INDEX 0

View File

@ -106,7 +106,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData);
// SRow ================================
int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow);
void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal);
void tRowDestroy(SRow *pRow);
void tRowSort(SArray *aRowP);
int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag);

View File

@ -49,6 +49,7 @@ extern int32_t tsTagFilterResCacheSize;
// queue & threads
extern int32_t tsNumOfRpcThreads;
extern int32_t tsNumOfRpcSessions;
extern int32_t tsNumOfCommitThreads;
extern int32_t tsNumOfTaskQueueThreads;
extern int32_t tsNumOfMnodeQueryThreads;
@ -86,9 +87,9 @@ extern int32_t tsTelemInterval;
extern char tsTelemServer[];
extern uint16_t tsTelemPort;
extern bool tsEnableCrashReport;
extern char* tsTelemUri;
extern char* tsClientCrashReportUri;
extern char* tsSvrCrashReportUri;
extern char *tsTelemUri;
extern char *tsClientCrashReportUri;
extern char *tsSvrCrashReportUri;
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
@ -159,6 +160,8 @@ extern int32_t tsUptimeInterval;
extern int32_t tsRpcRetryLimit;
extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd,

View File

@ -1846,6 +1846,7 @@ typedef struct {
int8_t createStb;
uint64_t targetStbUid;
SArray* fillNullCols; // array of SColLocation
int64_t deleteMark;
int8_t igUpdate;
} SCMCreateStreamReq;

View File

@ -172,8 +172,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, "lost-consumer-clear", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)

View File

@ -76,7 +76,7 @@ enum {
enum {
MAIN_SCAN = 0x0u,
REVERSE_SCAN = 0x1u, // todo remove it
REPEAT_SCAN = 0x2u, // repeat scan belongs to the master scan
PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan
};
typedef struct SPoint1 {
@ -132,14 +132,16 @@ typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
SResultDataInfo resDataInfo;
uint32_t order; // data block scanner order: asc|desc
uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason]
uint8_t isNotNullFunc;// not return null value.
uint8_t scanFlag; // record current running step, default: 0
int16_t functionId; // function id
char *pOutput; // final result output buffer, point to sdata->data
int32_t numOfParams;
// input parameter, e.g., top(k, 20), the number of results of top query is kept in param
SFunctParam *param;
// corresponding output buffer for timestamp of each result, e.g., diff/csum
SColumnInfoData *pTsOutput;
int32_t numOfParams;
int32_t offset;
SResultRowEntryInfo *resultInfo;
SSubsidiaryResInfo subsidiaries;
@ -152,7 +154,7 @@ typedef struct SqlFunctionCtx {
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
SSerializeDataHandle saveHandle;
int32_t exprIdx;
char udfName[TSDB_FUNC_NAME_LEN];
char *udfName;
} SqlFunctionCtx;
typedef struct tExprNode {

View File

@ -114,6 +114,7 @@ int32_t streamStateGetParTag(SStreamState* pState, int64_t groupId, void** tagVa
#if 0
char* streamStateSessionDump(SStreamState* pState);
char* streamStateIntervalDump(SStreamState* pState);
#endif
#ifdef __cplusplus

View File

@ -175,20 +175,24 @@ typedef struct {
void streamFreeQitem(SStreamQueueItem* data);
#if 0
bool streamQueueResEmpty(const SStreamQueueRes* pRes);
int64_t streamQueueResSize(const SStreamQueueRes* pRes);
SStreamQueueNode* streamQueueResFront(SStreamQueueRes* pRes);
SStreamQueueNode* streamQueueResPop(SStreamQueueRes* pRes);
void streamQueueResClear(SStreamQueueRes* pRes);
SStreamQueueRes streamQueueBuildRes(SStreamQueueNode* pNode);
#endif
typedef struct {
SStreamQueueNode* pHead;
} SStreamQueue1;
#if 0
bool streamQueueHasTask(const SStreamQueue1* pQueue);
int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem);
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
#endif
typedef struct {
STaosQueue* queue;
@ -633,9 +637,10 @@ typedef struct SStreamMeta {
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen);
SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
// SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
@ -644,7 +649,7 @@ void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
// checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);

View File

@ -36,7 +36,7 @@ extern "C" {
#define SYNC_DEL_WAL_MS (1000 * 60)
#define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_MNODE_LOG_RETENTION 10000
#define SYNC_VNODE_LOG_RETENTION 20
#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1)
#define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10
#define SNAPSHOT_WAIT_MS 1000 * 30

View File

@ -112,7 +112,12 @@ typedef struct SRpcInit {
// fail fast fp
RpcFFfp ffp;
void *parent;
int32_t connLimitNum;
int32_t connLimitLock;
int8_t supportBatch; // 0: no batch, 1. batch
int32_t batchSize;
void *parent;
} SRpcInit;
typedef struct {

View File

@ -41,6 +41,7 @@ extern char tsSSE42Enable;
extern char tsAVXEnable;
extern char tsAVX2Enable;
extern char tsFMAEnable;
extern char tsTagFilterCache;
extern char configDir[];
extern char tsDataDir[];

View File

@ -67,6 +67,10 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) //
#define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected"
#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) //
#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) //
//common & util
#define TSDB_CODE_OPS_NOT_SUPPORT TAOS_DEF_ERROR_CODE(0, 0x0100) //
@ -115,6 +119,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) //
#define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) //
#define TSDB_CODE_IVLD_DATA_FMT TAOS_DEF_ERROR_CODE(0, 0x0132) //
//client
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)

View File

@ -43,6 +43,7 @@ typedef struct SArray {
* @return
*/
SArray* taosArrayInit(size_t size, size_t elemSize);
SArray* taosArrayInit_s(size_t size, size_t elemSize, size_t initialSize);
/**
*
@ -139,14 +140,6 @@ void* taosArrayGetLast(const SArray* pArray);
*/
size_t taosArrayGetSize(const SArray* pArray);
/**
* set the size of array
* @param pArray
* @param size size of the array
* @return
*/
void taosArraySetSize(SArray* pArray, size_t size);
/**
* insert data into array
* @param pArray

View File

@ -283,8 +283,9 @@ typedef enum ELogicConditionType {
#define TSDB_DNODE_ROLE_MGMT 1
#define TSDB_DNODE_ROLE_VNODE 2
#define TSDB_MAX_REPLICA 5
#define TSDB_SYNC_LOG_BUFFER_SIZE 4096
#define TSDB_MAX_REPLICA 5
#define TSDB_SYNC_LOG_BUFFER_SIZE 4096
#define TSDB_SYNC_LOG_BUFFER_RETENTION (TSDB_SYNC_LOG_BUFFER_SIZE >> 4)
#define TSDB_TBNAME_COLUMN_INDEX (-1)
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta
@ -414,7 +415,7 @@ typedef enum ELogicConditionType {
#ifdef WINDOWS
#define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections.
#else
#define TSDB_MAX_RPC_THREADS 20
#define TSDB_MAX_RPC_THREADS 10
#endif
#define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type

View File

@ -89,7 +89,7 @@ bool taosAssertRelease(bool condition);
// Disable all asserts that may compromise the performance.
#if defined DISABLE_ASSERT
#define ASSERT(condition)
#define ASSERTS(condition, ...)
#define ASSERTS(condition, ...) (0)
#else
#define ASSERTS(condition, ...) taosAssertDebug(condition, __FILE__, __LINE__, __VA_ARGS__)
#ifdef NDEBUG

View File

@ -116,6 +116,7 @@ typedef struct SHNode {
struct SHNode *next;
uint32_t keyLen : 20;
uint32_t dataLen : 12;
uint32_t hashVal;
char data[];
} SHNode;
#pragma pack(pop)

View File

@ -45,11 +45,25 @@ typedef struct STraceId {
#define TRACE_GET_MSGID(traceId) (traceId)->msgId
#define TRACE_TO_STR(traceId, buf) \
do { \
int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \
int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \
sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \
//#define TRACE_TO_STR(traceId, buf) \
// do { \
// int64_t rootId = (traceId) != NULL ? (traceId)->rootId : 0; \
// int64_t msgId = (traceId) != NULL ? (traceId)->msgId : 0; \
// sprintf(buf, "0x%" PRIx64 ":0x%" PRIx64 "", rootId, msgId); \
// } while (0)
#define TRACE_TO_STR(_traceId, _buf) \
do { \
int64_t rootId = (_traceId) != NULL ? (_traceId)->rootId : 0; \
int64_t msgId = (_traceId) != NULL ? (_traceId)->msgId : 0; \
char* _t = _buf; \
_t[0] = '0'; \
_t[1] = 'x'; \
_t += titoa(rootId, 16, &_t[2]); \
_t[0] = ':'; \
_t[1] = '0'; \
_t[2] = 'x'; \
_t += titoa(msgId, 16, &_t[3]); \
} while (0)
#ifdef __cplusplus

View File

@ -46,6 +46,9 @@ char *paGetToken(char *src, char **token, int32_t *tokenLen);
int32_t taosByteArrayToHexStr(char bytes[], int32_t len, char hexstr[]);
int32_t taosHexStrToByteArray(char hexstr[], char bytes[]);
int32_t tintToHex(uint64_t val, char hex[]);
int32_t titoa(uint64_t val, size_t radix, char str[]);
char *taosIpStr(uint32_t ipInt);
uint32_t ip2uint(const char *const ip_addr);
void taosIp2String(uint32_t ip, char *str);

328
include/util/xxhash.h Normal file
View File

@ -0,0 +1,328 @@
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/* Notice extracted from xxHash homepage :
xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
It also successfully passes all tests from the SMHasher suite.
Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
Name Speed Q.Score Author
xxHash 5.4 GB/s 10
CrapWow 3.2 GB/s 2 Andrew
MumurHash 3a 2.7 GB/s 10 Austin Appleby
SpookyHash 2.0 GB/s 10 Bob Jenkins
SBox 1.4 GB/s 9 Bret Mulvey
Lookup3 1.2 GB/s 9 Bob Jenkins
SuperFastHash 1.2 GB/s 1 Paul Hsieh
CityHash64 1.05 GB/s 10 Pike & Alakuijala
FNV 0.55 GB/s 5 Fowler, Noll, Vo
CRC32 0.43 GB/s 9
MD5-32 0.33 GB/s 10 Ronald L. Rivest
SHA1-32 0.28 GB/s 10
Q.Score is a measure of quality of the hash function.
It depends on successfully passing SMHasher test set.
10 is a perfect score.
A 64-bit version, named XXH64, is available since r35.
It offers much better speed, but for 64-bit applications only.
Name Speed on 64 bits Speed on 32 bits
XXH64 13.8 GB/s 1.9 GB/s
XXH32 6.8 GB/s 6.0 GB/s
*/
#ifndef XXHASH_H_5627135585666179
#define XXHASH_H_5627135585666179 1
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************
* Definitions
******************************/
#include <stddef.h> /* size_t */
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
/* ****************************
* API modifier
******************************/
/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
* This is useful to include xxhash functions in `static` mode
* in order to inline them, and remove their symbol from the public list.
* Inlining can offer dramatic performance improvement on small keys.
* Methodology :
* #define XXH_INLINE_ALL
* #include "xxhash.h"
* `xxhash.c` is automatically included.
* It's not useful to compile and link it as a separate module.
*/
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY
# endif
# if defined(__GNUC__)
# define XXH_PUBLIC_API static __inline __attribute__((unused))
# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define XXH_PUBLIC_API static inline
# elif defined(_MSC_VER)
# define XXH_PUBLIC_API static __inline
# else
/* this version may generate warnings for unused static functions */
# define XXH_PUBLIC_API static
# endif
#else
# define XXH_PUBLIC_API /* do nothing */
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
/*! XXH_NAMESPACE, aka Namespace Emulation :
*
* If you want to include _and expose_ xxHash functions from within your own library,
* but also want to avoid symbol collisions with other libraries which may also include xxHash,
*
* you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
* with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
*
* Note that no change is required within the calling program as long as it includes `xxhash.h` :
* regular symbol name will be automatically translated by this header.
*/
#ifdef XXH_NAMESPACE
# define XXH_CAT(A,B) A##B
# define XXH_NAME2(A,B) XXH_CAT(A,B)
# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
#endif
/* *************************************
* Version
***************************************/
#define XXH_VERSION_MAJOR 0
#define XXH_VERSION_MINOR 6
#define XXH_VERSION_RELEASE 5
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
XXH_PUBLIC_API unsigned XXH_versionNumber (void);
/*-**********************************************************************
* 32-bit hash
************************************************************************/
typedef unsigned int XXH32_hash_t;
/*! XXH32() :
Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
The memory between input & input+length must be valid (allocated and read-accessible).
"seed" can be used to alter the result predictably.
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
/*====== Streaming ======*/
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
/*
* Streaming functions generate the xxHash of an input provided in multiple segments.
* Note that, for small input, they are slower than single-call functions, due to state management.
* For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
*
* XXH state must first be allocated, using XXH*_createState() .
*
* Start a new hash by initializing state with a seed, using XXH*_reset().
*
* Then, feed the hash state by calling XXH*_update() as many times as necessary.
* The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
*
* Finally, a hash value can be produced anytime, by using XXH*_digest().
* This function returns the nn-bits hash as an int or long long.
*
* It's still possible to continue inserting input into the hash state after a digest,
* and generate some new hashes later on, by calling again XXH*_digest().
*
* When done, free XXH state space if it was allocated dynamically.
*/
/*====== Canonical representation ======*/
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
*/
#ifndef XXH_NO_LONG_LONG
/*-**********************************************************************
* 64-bit hash
************************************************************************/
typedef unsigned long long XXH64_hash_t;
/*! XXH64() :
Calculate the 64-bit hash of sequence of length "len" stored at memory address "input".
"seed" can be used to alter the result predictably.
This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
*/
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
/*====== Streaming ======*/
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
/*====== Canonical representation ======*/
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
#endif /* XXH_NO_LONG_LONG */
#ifdef XXH_STATIC_LINKING_ONLY
/* ================================================================================================
This section contains declarations which are not guaranteed to remain stable.
They may change in future versions, becoming incompatible with a different version of the library.
These declarations should only be used with static linking.
Never use them in association with dynamic linking !
=================================================================================================== */
/* These definitions are only present to allow
* static allocation of XXH state, on stack or in a struct for example.
* Never **ever** use members directly. */
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
struct XXH32_state_s {
uint32_t total_len_32;
uint32_t large_len;
uint32_t v1;
uint32_t v2;
uint32_t v3;
uint32_t v4;
uint32_t mem32[4];
uint32_t memsize;
uint32_t reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */
struct XXH64_state_s {
uint64_t total_len;
uint64_t v1;
uint64_t v2;
uint64_t v3;
uint64_t v4;
uint64_t mem64[4];
uint32_t memsize;
uint32_t reserved[2]; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */
# else
struct XXH32_state_s {
unsigned total_len_32;
unsigned large_len;
unsigned v1;
unsigned v2;
unsigned v3;
unsigned v4;
unsigned mem32[4];
unsigned memsize;
unsigned reserved; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH32_state_t */
# ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
struct XXH64_state_s {
unsigned long long total_len;
unsigned long long v1;
unsigned long long v2;
unsigned long long v3;
unsigned long long v4;
unsigned long long mem64[4];
unsigned memsize;
unsigned reserved[2]; /* never read nor write, might be removed in a future version */
}; /* typedef'd to XXH64_state_t */
# endif
# endif
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */
#endif
#endif /* XXH_STATIC_LINKING_ONLY */
#if defined (__cplusplus)
}
#endif
#endif /* XXHASH_H_5627135585666179 */

View File

@ -18,65 +18,58 @@ TDengine is an open-sourced big data platform under [GNU AGPL v3.0](http://www.g
## How to use this image
### Start a TDengine instance with RESTful API exposed
### Starting TDengine
Simply, you can use `docker run` to start a TDengine instance and connect it with restful connectors(eg. [JDBC-RESTful](https://www.taosdata.com/cn/documentation/connector/java)).
The TDengine image starts with the HTTP service activated by default, using the following command:
```bash
```shell
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
```
This command starts a docker container by name `tdengine` with TDengine server running, and maps the container's HTTP port 6041 to the host's port 6041. If you have `curl` in your host, you can list the databases by the command:
The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
```bash
```shell
curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
```
You can execute the `taos` shell command in the container:
The TDengine client taos can be executed in this container to access TDengine using the following command.
```bash
```shell
$ docker exec -it tdengine taos
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
Query OK, 1 row(s) in set (0.002843s)
name |
=================================
information_schema |
performance_schema |
Query OK, 2 row(s) in set (0.002843s)
```
Since TDengine use container hostname to establish connections, it's a bit more complex to use TDengine CLI and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use TDengine CLI or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios.
### Start with host network
### Start TDengine on the host network
```bash
```shell
docker run -d --name tdengine --network host tdengine/tdengine
```
Starts container with `host` network will use host's hostname as fqdn instead of container id. It's much like starting natively with `systemd` in host. After installing the client, you can use `taos` shell as normal in host path.
The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command.
```bash
```shell
$ taos
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos> show dnodes;
id | end_point | vnodes | cores | status | role | create_time | offline reason |
======================================================================================================================================
1 | host:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | |
1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | |
Query OK, 1 row(s) in set (0.003233s)
```
### Start with exposed ports and specified hostname
### Start TDengine with the specified hostname and port
Set the fqdn explicitly will help you to use in other environment or applications. We provide environment variable `TAOS_FQDN` or `fqdn` config option to explicitly set the hostname used by TDengine container instance(s).
The `TAOS_FQDN` environment variable or the `fqdn` configuration item in `taos.cfg` allows TDengine to establish a connection at the specified hostname. This approach provides greater flexibility for deployment.
Use `TAOS_FQDN` variable within `docker run` command:
```bash
```shell
docker run -d \
--name tdengine \
-e TAOS_FQDN=tdengine \
@ -85,79 +78,58 @@ docker run -d \
tdengine/tdengine
```
This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`).
The above command starts a TDengine service in the container, which listens to the hostname tdengine, and maps the container's port segment 6030 to 6049 to the host's port segment 6030 to 6049 (both TCP and UDP ports need to be mapped). If the port segment is already occupied on the host, you can modify the above command to specify a free port segment on the host. If `rpcForceTcp` is set to `1`, you can map only the TCP protocol.
If you want to use TDengine CLI or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`.
If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable):
```bash
```shell
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
```
Then you can use `taos` with the host `tdengine`:
Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address.
```bash
taos -h tdengine
```shell
taos -h tdengine -P 6030
```
Or develop/test applications with native connectors. As in python:
If set `TAOS_FQDN` to the same hostname, the effect is the same as "Start TDengine on host network".
```python
import taos;
conn = taos.connect(host = "tdengine")
res = conn.query("show databases")
for row in res.fetch_all_into_dict():
print(row)
```
### Start TDengine on the specified network
See the results:
You can also start TDengine on a specific network. Perform the following steps:
```bash
Python 3.8.10 (default, Nov 26 2021, 20:14:08)
[GCC 9.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import taos;
>>> conn = taos.connect(host = "tdengine")
>>> res = conn.query("show databases")
>>> for row in res.fetch_all_into_dict():
... print(row)
...
{'name': 'log', 'created_time': datetime.datetime(2022, 1, 17, 22, 56, 2, 490000), 'ntables': 11, 'vgroups': 1, 'replica': 1, 'quorum': 1, 'days': 10, 'keep': '30', 'cache(MB)': 1, 'blocks': 3, 'minrows': 100, 'maxrows': 4096, 'wallevel': 1, 'fsync': 3000, 'comp': 2, 'cachelast': 0, 'precision': 'us', 'update': 0, 'status': 'ready'}
```
1. First, create a docker network named `td-net`
### Start with specific network
```shell
docker network create td-net
```
Alternatively, you can use TDengine natively by using specific network.
2. Start TDengine
First, create network for TDengine server and client/application.
Start the TDengine service on the `td-net` network with the following command:
```bash
docker network create td-net
```
```shell
docker run -d --name tdengine --network td-net \
-e TAOS_FQDN=tdengine \
tdengine/tdengine
```
Start TDengine instance with service name as fqdn (explicitly set with `TAOS_FQDN`):
3. Start the TDengine client in another container on the same network
```bash
docker run -d --name tdengine --network td-net \
-e TAOS_FQDN=tdengine \
tdengine/tdengine
```
```shell
docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos
# or
#docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine
```
Start TDengine client in another container with the specific network:
### Launching a client application in a container
```bash
docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos
# or
docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine
```
If you want to start your application in a container, you need to add the corresponding dependencies on TDengine to the image as well, e.g.
When you build your application with docker, you should add the TDengine client in the dockerfile, as based on `ubuntu:20.04` image, install the client like this:
```dockerfile
```docker
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
ENV TDENGINE_VERSION=3.0.0.0
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
@ -169,10 +141,7 @@ RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENG
#CMD ["app"]
```
Here is an Go example app:
<!-- code-spell-checker:disable -->
<!-- markdownlint-disable MD010 -->
Here is an example GO program:
```go
/*
@ -181,19 +150,19 @@ Here is an Go example app:
package main
import (
"database/sql"
"flag"
"fmt"
"time"
"database/sql"
"flag"
"fmt"
"time"
_ "github.com/taosdata/driver-go/v2/taosSql"
_ "github.com/taosdata/driver-go/v3/taosSql"
)
type config struct {
hostName string
serverPort string
user string
password string
hostName string
serverPort string
user string
password string
}
var configPara config
@ -201,70 +170,67 @@ var taosDriverName = "taosSql"
var url string
func init() {
flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.")
flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.")
flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.")
flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.")
flag.Parse()
flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.")
flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.")
flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.")
flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.")
flag.Parse()
}
func printAllArgs() {
fmt.Printf("============= args parse result: =============\n")
fmt.Printf("hostName: %v\n", configPara.hostName)
fmt.Printf("serverPort: %v\n", configPara.serverPort)
fmt.Printf("usr: %v\n", configPara.user)
fmt.Printf("password: %v\n", configPara.password)
fmt.Printf("================================================\n")
fmt.Printf("============= args parse result: =============\n")
fmt.Printf("hostName: %v\n", configPara.hostName)
fmt.Printf("serverPort: %v\n", configPara.serverPort)
fmt.Printf("usr: %v\n", configPara.user)
fmt.Printf("password: %v\n", configPara.password)
fmt.Printf("================================================\n")
}
func main() {
printAllArgs()
printAllArgs()
url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/"
url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/"
taos, err := sql.Open(taosDriverName, url)
checkErr(err, "open database error")
defer taos.Close()
taos, err := sql.Open(taosDriverName, url)
checkErr(err, "open database error")
defer taos.Close()
taos.Exec("create database if not exists test")
taos.Exec("use test")
taos.Exec("create table if not exists tb1 (ts timestamp, a int)")
_, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)")
checkErr(err, "failed to insert")
rows, err := taos.Query("select * from tb1")
checkErr(err, "failed to select")
taos.Exec("create database if not exists test")
taos.Exec("use test")
taos.Exec("create table if not exists tb1 (ts timestamp, a int)")
_, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)")
checkErr(err, "failed to insert")
rows, err := taos.Query("select * from tb1")
checkErr(err, "failed to select")
defer rows.Close()
for rows.Next() {
var r struct {
ts time.Time
a int
}
err := rows.Scan(&r.ts, &r.a)
if err != nil {
fmt.Println("scan error:\n", err)
return
}
fmt.Println(r.ts, r.a)
}
defer rows.Close()
for rows.Next() {
var r struct {
ts time.Time
a int
}
err := rows.Scan(&r.ts, &r.a)
if err != nil {
fmt.Println("scan error:\n", err)
return
}
fmt.Println(r.ts, r.a)
}
}
func checkErr(err error, prompt string) {
if err != nil {
fmt.Println("ERROR: %s\n", prompt)
panic(err)
}
if err != nil {
fmt.Println("ERROR: %s\n", prompt)
panic(err)
}
}
```
<!-- markdownlint-enable MD010 -->
<!-- code-spell-checker:enable -->
Here is the full Dockerfile:
Full version of dockerfile could be:
```dockerfile
```docker
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=2.4.0.0
ENV TDENGINE_VERSION=3.0.0.0
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
@ -274,11 +240,13 @@ RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENG
WORKDIR /usr/src/app/
ENV GOPROXY="https://goproxy.io,direct"
COPY ./main.go ./go.mod ./go.sum /usr/src/app/
RUN go env && go mod tidy && go build
RUN go env
RUN go mod tidy
RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
ENV TDENGINE_VERSION=3.0.0.0
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
@ -291,9 +259,9 @@ COPY --from=builder /usr/src/app/app /usr/bin/
CMD ["app"]
```
Suppose you have `main.go`, `go.mod` `go.sum`, `app.dockerfile`, build the app and run it with network `td-net`:
Now that we have `main.go`, `go.mod`, `go.sum`, `app.dockerfile`, we can build the application and start it on the `td-net` network.
```bash
```shell
$ docker build -t app -f app.dockerfile
$ docker run --rm --network td-net app -h tdengine -p 6030
============= args parse result: =============
@ -316,26 +284,18 @@ password: taosdata
2022-01-18 01:43:51.029 +0000 UTC 3
```
Now you must be much familiar with developing and testing with TDengine, let's see some more complex cases.
### Start the TDengine cluster with docker-compose
### Start with docker-compose with multiple nodes(instances)
1. The following docker-compose file starts a TDengine cluster with three nodes.
Start a 2-replicas-2-mnodes-2-dnodes-1-arbitrator TDengine cluster with `docker-compose` is quite simple. Save the file as `docker-compose.yml`:
```yaml
```yml
version: "3"
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
td-1:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
@ -344,101 +304,95 @@ services:
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
td-3:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-3"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td3:/var/lib/taos/
- taoslog-td3:/var/log/taos/
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
taosdata-td3:
taoslog-td3:
```
You may notice that:
:::note
- We use `VERSION` environment variable to set `tdengine` image tag version once.
- **`TAOS_FIRST_EP`** **MUST** be set to join the newly created instances into an existing TDengine cluster. If you want more instances, use `TAOS_SECOND_EP` in case of HA(High Availability) concerns.
- `TAOS_NUM_OF_MNODES` is for setting number of mnodes for the cluster.
- `TAOS_REPLICA` set the default database replicas, `2` means there're one master and one slave copy of data. The `replica` option should be `1 <= replica <= 3`, and not greater than dnodes number.
- `TAOS_ARBITRATOR` set the arbitrator entrypoint of the cluster for failover/election stuff. It's better to use arbitrator in a two nodes cluster.
- The way to start an arbitrator service is as easy as abc: just add command name `tarbitrator`(which is the binary name of arbitrator daemon) in docker-compose service option: `command: tarbitrator`, and everything is ok now.
- The `VERSION` environment variable is used to set the tdengine image tag
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
:::
Now run `docker-compose up -d` with version specified:
2. Start the cluster
```bash
$ VERSION=2.4.0.0 docker-compose up -d
Creating network "test_default" with the default driver
Creating volume "test_taosdata-td1" with default driver
Creating volume "test_taoslog-td1" with default driver
Creating volume "test_taosdata-td2" with default driver
Creating volume "test_taoslog-td2" with default driver
Creating test_td-1_1 ... done
Creating test_arbitrator_1 ... done
Creating test_td-2_1 ... done
```
```shell
$ VERSION=3.0.0.0 docker-compose up -d
Creating network "test_default" with the default driver
Creating volume "test_taosdata-td1" with default driver
Creating volume "test_taoslog-td1" with default driver
Creating volume "test_taosdata-td2" with default driver
Creating volume "test_taoslog-td2" with default driver
Creating test_td-1_1 ... done
Creating test_arbitrator_1 ... done
Creating test_td-2_1 ... done
```
Check the status:
3. Check the status of each node
```bash
$ docker-compose ps
Name Command State Ports
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
```
```shell
$ docker-compose ps
Name Command State Ports
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
```
Check dnodes with TDengine CLI:
4. Show dnodes via TDengine CLI
```bash
```shell
$ docker-compose exec td-1 taos -s "show dnodes"
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
id | endpoint | vnodes | support_vnodes | status | create_time | note |
======================================================================================================================================
1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | |
2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | |
0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - |
Query OK, 3 row(s) in set (0.000811s)
1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | |
2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | |
3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | |
Query OK, 3 rows in database (0.021262s)
```
### Start a TDengine cluster with scaled taosadapter service
## taosAdapter
In previous use case, you could see the way to start other services built with TDengine(`taosd` as the default command). There's another important service you should know:
1. taosAdapter is enabled by default in the TDengine container. If you want to disable it, specify the environment variable `TAOS_DISABLE_ADAPTER=true` at startup
> **taosAdapter** is a TDengines companion tool and is a bridge/adapter between TDengine cluster and application. It provides an easy-to-use and efficient way to ingest data from data collections agents(like Telegraf, StatsD, CollectD) directly. It also provides InfluxDB/OpenTSDB compatible data ingestion interface to allow InfluxDB/OpenTSDB applications to immigrate to TDengine seamlessly.
2. At the same time, for flexible deployment, taosAdapter can be started in a separate container
`taosadapter` is running inside `tdengine` image by default, you can disable it by `TAOS_DISABLE_ADAPTER=true`. Running `taosadapter` in a separate container is like how `arbitrator` does:
```docker
services:
# ...
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
```
```yaml
services:
# ...
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
```
Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example:
`taosadapter` could be scaled with docker-compose, so that you can manage the `taosadapter` nodes easily. Here is an example shows 4-`taosadapter` instances in a TDengine cluster(much like previous use cases):
```yaml
```yml
version: "3"
networks:
inter:
api:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
@ -446,9 +400,6 @@ services:
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
@ -459,15 +410,12 @@ services:
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
entrypoint: "taosadapter"
networks:
- inter
environment:
@ -481,7 +429,6 @@ services:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
@ -504,100 +451,14 @@ volumes:
taoslog-td2:
```
Start the cluster:
## Deploy with docker swarm
```bash
$ VERSION=2.4.0.0 docker-compose up -d
Creating network "docker_inter" with the default driver
Creating network "docker_api" with the default driver
Creating volume "docker_taosdata-td1" with default driver
Creating volume "docker_taoslog-td1" with default driver
Creating volume "docker_taosdata-td2" with default driver
Creating volume "docker_taoslog-td2" with default driver
Creating docker_td-2_1 ... done
Creating docker_arbitrator_1 ... done
Creating docker_td-1_1 ... done
Creating docker_adapter_1 ... done
Creating docker_adapter_2 ... done
Creating docker_adapter_3 ... done
```
If you want to deploy a container-based TDengine cluster on multiple hosts, you can use docker swarm. First, to establish a docker swarm cluster on these hosts, please refer to the official docker documentation.
It will start a TDengine cluster with two dnodes and four taosadapter instances, expose ports 6041/tcp and 6044/udp to host.
The docker-compose file can refer to the previous section. Here is the command to start TDengine with docker swarm:
`6041` is the RESTful API endpoint port, you can verify that the RESTful interface taosAdapter provides working using the `curl` command.
```bash
$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2022-01-18 04:37:42.902",16,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1}
```
If you run curl in batch(here we use [hyperfine](https://github.com/sharkdp/hyperfine) - a command-line benchmarking tool), the requests are balanced into 4 adapter instances.
```bash
hyperfine -m10 'curl -u root:taosdata localhost:6041/rest/sql -d "describe log.log"'
```
View the logs with `docker-compose logs`:
```bash
$ docker-compose logs adapter
# some logs skipped
adapter_2 | 01/18 04:57:44.616529 00000039 TAOS_ADAPTER info "| 200 | 162.185µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
adapter_1 | 01/18 04:57:44.627695 00000039 TAOS_ADAPTER info "| 200 | 145.485µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=17
adapter_3 | 01/18 04:57:44.639165 00000040 TAOS_ADAPTER info "| 200 | 146.913µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web
adapter_4 | 01/18 04:57:44.650829 00000039 TAOS_ADAPTER info "| 200 | 153.201µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web
adapter_2 | 01/18 04:57:44.662422 00000039 TAOS_ADAPTER info "| 200 | 211.393µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19
adapter_1 | 01/18 04:57:44.673426 00000039 TAOS_ADAPTER info "| 200 | 154.714µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
adapter_3 | 01/18 04:57:44.684788 00000040 TAOS_ADAPTER info "| 200 | 131.876µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
adapter_4 | 01/18 04:57:44.696261 00000039 TAOS_ADAPTER info "| 200 | 162.173µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18
adapter_2 | 01/18 04:57:44.707414 00000039 TAOS_ADAPTER info "| 200 | 164.419µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
adapter_1 | 01/18 04:57:44.720842 00000039 TAOS_ADAPTER info "| 200 | 179.374µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19
adapter_3 | 01/18 04:57:44.732184 00000040 TAOS_ADAPTER info "| 200 | 141.174µs | 172.21.0.9 | POST | /rest/sql " sessionID=19 model=web
adapter_4 | 01/18 04:57:44.744024 00000039 TAOS_ADAPTER info "| 200 | 159.774µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19
adapter_2 | 01/18 04:57:44.773732 00000039 TAOS_ADAPTER info "| 200 | 178.993µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=21
adapter_1 | 01/18 04:57:44.796518 00000039 TAOS_ADAPTER info "| 200 | 238.24µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
adapter_3 | 01/18 04:57:44.810744 00000040 TAOS_ADAPTER info "| 200 | 176.133µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
adapter_4 | 01/18 04:57:44.826395 00000039 TAOS_ADAPTER info "| 200 | 149.215µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20
```
`6044/udp` is the [StatsD](https://github.com/statsd/statsd)-compatible port, you can verify this feature with `nc` command(usually provided by `netcat` package).
```bash
echo "foo:1|c" | nc -u -w0 127.0.0.1 6044
```
Check the result in `taos` shell with `docker-compose exec`:
```bash
$ dc exec td-1 taos
Welcome to the TDengine shell from Linux, Client Version:2.4.0.0
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2022-01-18 04:37:42.902 | 17 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
statsd | 2022-01-18 04:45:02.563 | 1 | 1 | 2 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.001838s)
taos> select * from statsd.foo;
ts | value | metric_type |
=======================================================================================
2022-01-18 04:45:02.563422822 | 1 | counter |
Query OK, 1 row(s) in set (0.003854s)
```
Use `docker-compose up -d adapter=1 to reduce the instances to 1
### Deploy TDengine cluster in Docker Swarm with `docker-compose.yml`
If you use docker swarm mode, it will schedule arbitrator/taosd/taosadapter services into different hosts automatically. If you've no experience with k8s/kubernetes, this is the most convenient way to scale out the TDengine cluster with multiple hosts/servers.
Use the `docker-compose.yml` file in previous use case, and deploy with `docker stack` or `docker deploy`:
```bash
$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos
```shell
$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos
Creating network taos_inter
Creating network taos_api
Creating service taos_arbitrator
@ -607,58 +468,40 @@ Creating service taos_adapter
Creating service taos_nginx
```
Now you've created a TDengine cluster with multiple host servers.
Checking status:
Use `docker service` or `docker stack` to manage the cluster:
<!-- code-spell-checker:disable -->
```bash
```shell
$ docker stack ps taos
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago
3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago
100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago
pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago
tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago
rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago
i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago
lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago
79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago
3e94u72msiyg taos_adapter.1 tdengine/tdengine:3.0.0.0 TM1702 Running Running 56 seconds ago
100amjkwzsc6 taos_td-2.1 tdengine/tdengine:3.0.0.0 TM1703 Running Running about a minute ago
pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:3.0.0.0 TM1704 Running Running 2 minutes ago
tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:3.0.0.0 TM1705 Running Running 2 minutes ago
rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:3.0.0.0 TM1706 Running Running 56 seconds ago
i2augxamfllf taos_adapter.3 tdengine/tdengine:3.0.0.0 TM1707 Running Running 56 seconds ago
lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:3.0.0.0 TM1708 Running Running 56 seconds ago
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0
3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0
561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0
3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:3.0.0.0
d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0
9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0
2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0
9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0
```
<!-- code-spell-checker:enable -->
From the above output, you can see two dnodes, two taosAdapters, and one Nginx reverse proxy service.
It shows that there are two dnodes, one arbitrator, four taosadapter and one nginx reverse-forward service in this cluster.
Next, we can reduce the number of taosAdapter services.
You can scale down the taosadapter replicas to `1` by `docker service`:
```bash
```shell
$ docker service scale taos_adapter=1
taos_adapter scaled to 1
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
verify: Service converged
$ docker service ls -f name=taos_adapter
ID NAME MODE REPLICAS IMAGE PORTS
561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0
561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0
```
Now it remains only 1 taosadapter instance in the cluster.
When you want to remove the cluster, just type:
```bash
docker stack rm taos
```
### Environment Variables
When you start `tdengine` image, you can adjust the configuration of TDengine by passing environment variables on the `docker run` command line or in the docker compose file. You can use all of the environment variables that passed to taosd or taosadapter.

View File

@ -1,77 +0,0 @@
version: "3"
networks:
inter:
api:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TOAS_SECOND_EP: "td-2"
deploy:
replicas: 4
update_config:
parallelism: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:

View File

@ -1,9 +1,9 @@
#!/bin/bash
set -e
#set -x
set -v
set -v
# dockerbuild.sh
# dockerbuild.sh
# -n [version number]
# -p [xxxx]
# -V [stable | beta]
@ -28,7 +28,7 @@ do
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
;;
h)
echo "Usage: `basename $0` -n [version number] "
echo " -p [password for docker hub] "
@ -39,8 +39,8 @@ do
a)
#echo "dockerLatest=$OPTARG"
dockerLatest=$(echo $OPTARG)
;;
?) #unknow option
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
@ -60,7 +60,7 @@ if [ "$verType" == "stable" ]; then
elif [ "$verType" == "beta" ];then
verType=beta
tagVal=ver-${version}-beta
dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz
dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz
dockerinput_x64=TDengine-server-${version}-${verType}-Linux-amd64.tar.gz
dockerim=tdengine/tdengine-beta
dockeramd64=tdengine/tdengine-amd64-beta
@ -73,30 +73,30 @@ fi
username="tdengine"
# generate docker verison
# generate docker version
echo "generate ${dockerim}:${version}"
docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version}
docker manifest inspect ${dockerim}:${version}
docker manifest rm ${dockerim}:${version}
docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version}
docker manifest inspect ${dockerim}:${version}
docker login -u ${username} -p ${passWord}
docker login -u ${username} -p ${passWord}
docker manifest push ${dockerim}:${version}
# generate docker latest
# generate docker latest
echo "generate ${dockerim}:latest "
if [ ${dockerLatest} == 'y' ] ;then
echo "docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest"
docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest
docker manifest inspect ${dockerim}:latest
docker manifest rm ${dockerim}:latest
docker manifest inspect ${dockerim}:latest
docker manifest rm ${dockerim}:latest
docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest
docker manifest inspect ${dockerim}:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push ${dockerim}:latest
docker pull tdengine/tdengine:latest
docker pull tdengine/tdengine:latest
fi

View File

@ -74,7 +74,7 @@ do
done
# Check_verison()
# Check_version()
# {
# }
@ -102,14 +102,14 @@ scriptDir=$(dirname $(readlink -f $0))
communityDir=${scriptDir}/../../../community
DockerfilePath=${communityDir}/packaging/docker/
if [ "$cloudBuild" == "y" ]; then
comunityArchiveDir=/nas/TDengine/v$version/cloud
communityArchiveDir=/nas/TDengine/v$version/cloud
Dockerfile=${communityDir}/packaging/docker/DockerfileCloud
else
comunityArchiveDir=/nas/TDengine/v$version/community
communityArchiveDir=/nas/TDengine/v$version/community
Dockerfile=${communityDir}/packaging/docker/Dockerfile
fi
cd ${scriptDir}
cp -f ${comunityArchiveDir}/${pkgFile} .
cp -f ${communityArchiveDir}/${pkgFile} .
echo "dirName=${dirName}"

View File

@ -627,9 +627,16 @@ function install_app() {
fi
}
function install_TDengine() {
echo -e "${GREEN}Start to install TDengine...${NC}"
log_print "start to install TDengine"
function checkDirectory() {
if [ ! -d "${bin_link_dir}" ]; then
${csudo}mkdir -p ${bin_link_dir}
log_print "${bin_link_dir} directory created"
fi
if [ ! -d "${lib_link_dir}" ]; then
${csudo}mkdir -p ${lib_link_dir}
log_print "${lib_link_dir} directory created"
fi
#install log and data dir , then ln to /usr/local/taos
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
@ -640,6 +647,13 @@ function install_TDengine() {
${csudo}ln -s ${log_dir} ${log_link_dir} || :
${csudo}ln -s ${data_dir} ${data_link_dir} || :
}
function install_TDengine() {
echo -e "${GREEN}Start to install TDengine...${NC}"
log_print "start to install TDengine"
checkDirectory
# Install include, lib, binary and service
install_include &&

View File

@ -97,16 +97,14 @@ typedef struct {
typedef struct SQueryExecMetric {
int64_t start; // start timestamp, us
int64_t syntaxStart; // start to parse, us
int64_t syntaxEnd; // end to parse, us
int64_t ctgStart; // start to parse, us
int64_t ctgEnd; // end to parse, us
int64_t semanticEnd;
int64_t planEnd;
int64_t resultReady;
int64_t execEnd;
int64_t send; // start to send to server, us
int64_t rsp; // receive response from server, us
int64_t execStart; // start to parse, us
int64_t parseCostUs;
int64_t ctgCostUs;
int64_t analyseCostUs;
int64_t planCostUs;
int64_t execCostUs;
} SQueryExecMetric;
struct SAppInstInfo {

View File

@ -83,28 +83,22 @@ static void deregisterRequest(SRequestObj *pRequest) {
"current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, exec:%" PRId64 "us, stmtType:%d",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.execEnd - pRequest->metric.semanticEnd, pRequest->stmtType);
if (pRequest->pQuery && pRequest->pQuery->pRoot) {
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->pQuery->pRoot->type &&
(0 == ((SVnodeModifyOpStmt *)pRequest->pQuery->pRoot)->sqlNodeType)) {
tscDebug("insert duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64
"us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs,
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
tscDebug("query duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64
"us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs,
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) {
// tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, exec:%" PRId64 "us",
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
// pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd -
// pRequest->metric.ctgEnd, pRequest->metric.execEnd - pRequest->metric.semanticEnd);
// atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
// tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
// "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%" PRIx64,
// duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
// pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd -
// pRequest->metric.ctgEnd, pRequest->metric.planEnd - pRequest->metric.semanticEnd,
// pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
}
}
if (duration >= SLOW_QUERY_INTERVAL) {
@ -371,8 +365,6 @@ void doDestroyRequest(void *p) {
taosArrayDestroy(pRequest->tableList);
taosArrayDestroy(pRequest->dbList);
taosArrayDestroy(pRequest->targetTableList);
qDestroyQuery(pRequest->pQuery);
nodesDestroyAllocator(pRequest->allocatorRefId);
destroyQueryExecRes(&pRequest->body.resInfo.execRes);
@ -387,6 +379,9 @@ void doDestroyRequest(void *p) {
taosMemoryFree(pRequest->body.param);
}
qDestroyQuery(pRequest->pQuery);
nodesDestroyAllocator(pRequest->allocatorRefId);
taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFree(pRequest);
tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);

View File

@ -323,7 +323,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
return;
}
int32_t code = qExecCommand(&pRequest->pTscObj->id ,pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@ -465,7 +465,7 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
}
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols) {
if(pResInfo == NULL || pSchema == NULL || numOfCols <= 0){
if (pResInfo == NULL || pSchema == NULL || numOfCols <= 0) {
tscError("invalid paras, pResInfo == NULL || pSchema == NULL || numOfCols <= 0");
return;
}
@ -479,7 +479,7 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
}
pResInfo->fields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
pResInfo->userFields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
if(numOfCols != pResInfo->numOfCols){
if (numOfCols != pResInfo->numOfCols) {
tscError("numOfCols:%d != pResInfo->numOfCols:%d", numOfCols, pResInfo->numOfCols);
return;
}
@ -925,7 +925,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
removeMeta(pTscObj, pRequest->targetTableList);
}
pRequest->metric.execEnd = taosGetTimestampUs();
pRequest->metric.execCostUs = taosGetTimestampUs() - pRequest->metric.execStart;
int32_t code1 = handleQueryExecRsp(pRequest);
if (pRequest->code == TSDB_CODE_SUCCESS && pRequest->code != code1) {
pRequest->code = code1;
@ -1051,11 +1051,10 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
pRequest->body.subplanNum = pDag->numOfSubplans;
}
pRequest->metric.planEnd = taosGetTimestampUs();
if (code == TSDB_CODE_SUCCESS) {
tscDebug("0x%" PRIx64 " create query plan success, elapsed time:%.2f ms, 0x%" PRIx64, pRequest->self,
(pRequest->metric.planEnd - st) / 1000.0, pRequest->requestId);
}
pRequest->metric.execStart = taosGetTimestampUs();
pRequest->metric.planCostUs = pRequest->metric.execStart - st;
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
if (QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pQuery->pRoot)) {
@ -1103,6 +1102,17 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
destorySqlCallbackWrapper(pWrapper);
}
if (pQuery->pRoot && !pRequest->inRetry) {
STscObj* pTscObj = pRequest->pTscObj;
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_VNODE_MODIFY_STMT == pQuery->pRoot->type &&
(0 == ((SVnodeModifyOpStmt*)pQuery->pRoot)->sqlNodeType)) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1);
}
}
switch (pQuery->execMode) {
case QUERY_EXEC_MODE_LOCAL:
asyncExecLocalCmd(pRequest, pQuery);
@ -1358,7 +1368,7 @@ int32_t doProcessMsgFromServer(void* param) {
SEpSet* pEpSet = arg->pEpset;
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
if(pMsg->info.ahandle == NULL){
if (pMsg->info.ahandle == NULL) {
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -1374,24 +1384,12 @@ int32_t doProcessMsgFromServer(void* param) {
if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
if (pRequest) {
if(pRequest->self != pSendInfo->requestObjRefId){
tscError("doProcessMsgFromServer pRequest->self:%"PRId64" != pSendInfo->requestObjRefId:%"PRId64, pRequest->self, pSendInfo->requestObjRefId);
if (pRequest->self != pSendInfo->requestObjRefId) {
tscError("doProcessMsgFromServer pRequest->self:%" PRId64 " != pSendInfo->requestObjRefId:%" PRId64,
pRequest->self, pSendInfo->requestObjRefId);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
pRequest->metric.rsp = taosGetTimestampUs();
pTscObj = pRequest->pTscObj;
/*
* There is not response callback function for submit response.
* The actual inserted number of points is the first number.
*/
int32_t elapsed = pRequest->metric.rsp - pRequest->metric.start;
if (pMsg->code == TSDB_CODE_SUCCESS) {
tscDebug("0x%" PRIx64 " rsp msg:%s, code:%s rspLen:%d, elapsed:%d ms, reqId:0x%" PRIx64, pRequest->self,
TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), pMsg->contLen, elapsed / 1000, pRequest->requestId);
} else {
tscError("0x%" PRIx64 " rsp msg:%s, code:%s rspLen:%d, elapsed time:%d ms, reqId:0x%" PRIx64, pRequest->self,
TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), pMsg->contLen, elapsed / 1000, pRequest->requestId);
}
}
}
@ -1523,7 +1521,7 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) {
}
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
if(pRequest == NULL){
if (pRequest == NULL) {
return NULL;
}
@ -1579,7 +1577,7 @@ static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) {
}
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
if(pRequest == NULL){
if (pRequest == NULL) {
return NULL;
}
@ -1645,8 +1643,11 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
char* pStart = pCol->offset[j] + pCol->pData;
int32_t len = taosUcs4ToMbs((TdUcs4*)varDataVal(pStart), varDataLen(pStart), varDataVal(p));
if(len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])){
tscError("doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + colLength[i]):%p", len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i]));
if (len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])) {
tscError(
"doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + "
"colLength[i]):%p",
len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i]));
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -1675,7 +1676,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
if(ASSERT(numOfCols == cols)){
if (ASSERT(numOfCols == cols)) {
tscError("estimateJsonLen error: numOfCols:%d != cols:%d", numOfCols, cols);
return -1;
}
@ -1748,7 +1749,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
char* p = (char*)pResultInfo->pData;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
if(dataLen <= 0){
if (dataLen <= 0) {
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -1758,7 +1759,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
int32_t totalLen = 0;
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
if(ASSERT(numOfCols == cols)){
if (ASSERT(numOfCols == cols)) {
tscError("doConvertJson error: numOfCols:%d != cols:%d", numOfCols, cols);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -1783,7 +1784,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t colLen = htonl(colLength[i]);
int32_t colLen1 = htonl(colLength1[i]);
if(ASSERT(colLen < dataLen)){
if (ASSERT(colLen < dataLen)) {
tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -1870,7 +1871,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
bool convertUcs4) {
if(ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)){
if (ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)) {
tscError("setResultDataPtr paras error");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -1902,8 +1903,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
int32_t cols = *(int32_t*)p;
p += sizeof(int32_t);
if(ASSERT(rows == numOfRows && cols == numOfCols)){
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, numOfCols);
if (ASSERT(rows == numOfRows && cols == numOfCols)) {
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols,
numOfCols);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -1970,7 +1972,7 @@ char* getDbOfConnection(STscObj* pObj) {
}
void setConnectionDB(STscObj* pTscObj, const char* db) {
if(db == NULL || pTscObj == NULL){
if (db == NULL || pTscObj == NULL) {
tscError("setConnectionDB para is NULL");
return;
}
@ -1992,7 +1994,7 @@ void resetConnectDB(STscObj* pTscObj) {
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
bool freeAfterUse) {
if(pResultInfo == NULL || pRsp == NULL){
if (pResultInfo == NULL || pRsp == NULL) {
tscError("setQueryResultFromRsp paras is null");
return TSDB_CODE_TSC_INTERNAL_ERROR;
}

View File

@ -752,7 +752,8 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
SRequestObj *pRequest = pWrapper->pRequest;
SQuery *pQuery = pRequest->pQuery;
pRequest->metric.ctgEnd = taosGetTimestampUs();
int64_t analyseStart = taosGetTimestampUs();
pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
if (code == TSDB_CODE_SUCCESS) {
@ -763,7 +764,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
}
}
pRequest->metric.semanticEnd = taosGetTimestampUs();
pRequest->metric.analyseCostUs = taosGetTimestampUs() - analyseStart;
if (code == TSDB_CODE_SUCCESS) {
if (pQuery->haveResultSet) {
@ -775,10 +776,6 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
TSWAP(pRequest->tableList, (pQuery)->pTableList);
TSWAP(pRequest->targetTableList, (pQuery)->pTargetTableList);
double el = (pRequest->metric.semanticEnd - pRequest->metric.ctgEnd) / 1000.0;
tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, elapsed time:%.2f ms, reqId:0x%" PRIx64,
pRequest->self, el, pRequest->requestId);
launchAsyncQuery(pRequest, pQuery, pResultMeta, pWrapper);
} else {
destorySqlCallbackWrapper(pWrapper);
@ -843,7 +840,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
SRequestObj *pRequest = pWrapper->pRequest;
SQuery *pQuery = pRequest->pQuery;
pRequest->metric.ctgEnd = taosGetTimestampUs();
pRequest->metric.ctgCostUs += taosGetTimestampUs() - pRequest->metric.ctgStart;
qDebug("0x%" PRIx64 " start to continue parse, reqId:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId,
tstrerror(code));
@ -956,7 +953,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
}
if (TSDB_CODE_SUCCESS == code) {
pRequest->metric.syntaxStart = taosGetTimestampUs();
int64_t syntaxStart = taosGetTimestampUs();
pWrapper->pCatalogReq = taosMemoryCalloc(1, sizeof(SCatalogReq));
if (pWrapper->pCatalogReq == NULL) {
@ -967,19 +964,11 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
code = qParseSqlSyntax(pWrapper->pParseCtx, &pRequest->pQuery, pWrapper->pCatalogReq);
}
pRequest->metric.syntaxEnd = taosGetTimestampUs();
}
if (TSDB_CODE_SUCCESS == code && !updateMetaForce) {
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_INSERT_STMT == nodeType(pRequest->pQuery->pRoot)) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == nodeType(pRequest->pQuery->pRoot)) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1);
}
pRequest->metric.parseCostUs += taosGetTimestampUs() - syntaxStart;
}
if (TSDB_CODE_SUCCESS == code) {
pRequest->stmtType = pRequest->pQuery->pRoot->type;
phaseAsyncQuery(pWrapper);
} else {
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),
@ -1006,7 +995,6 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
SRequestObj *pRequest = (SRequestObj *)param;
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
pRequest->metric.resultReady = taosGetTimestampUs();
tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
tstrerror(code), pRequest->requestId);

View File

@ -456,12 +456,13 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
int32_t len = blockEncode(pBlock, (*pRsp)->data, SHOW_VARIABLES_RESULT_COLS);
blockDataDestroy(pBlock);
if(len != rspSize - sizeof(SRetrieveTableRsp)){
uError("buildShowVariablesRsp error, len:%d != rspSize - sizeof(SRetrieveTableRsp):%" PRIu64, len, (uint64_t) (rspSize - sizeof(SRetrieveTableRsp)));
return TSDB_CODE_TSC_INVALID_INPUT;
}
blockDataDestroy(pBlock);
return TSDB_CODE_SUCCESS;
}

File diff suppressed because it is too large Load Diff

View File

@ -21,23 +21,24 @@
#include "clientSml.h"
// comma ,
//#define IS_SLASH_COMMA(sql) (*(sql) == COMMA && *((sql)-1) == SLASH)
#define IS_COMMA(sql) (*(sql) == COMMA && *((sql)-1) != SLASH)
// #define IS_SLASH_COMMA(sql) (*(sql) == COMMA && *((sql)-1) == SLASH)
#define IS_COMMA(sql) (*(sql) == COMMA && *((sql)-1) != SLASH)
// space
//#define IS_SLASH_SPACE(sql) (*(sql) == SPACE && *((sql)-1) == SLASH)
#define IS_SPACE(sql) (*(sql) == SPACE && *((sql)-1) != SLASH)
// #define IS_SLASH_SPACE(sql) (*(sql) == SPACE && *((sql)-1) == SLASH)
#define IS_SPACE(sql) (*(sql) == SPACE && *((sql)-1) != SLASH)
// equal =
//#define IS_SLASH_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) == SLASH)
#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) != SLASH)
// #define IS_SLASH_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) == SLASH)
#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) != SLASH)
// quote "
//#define IS_SLASH_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) == SLASH)
#define IS_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) != SLASH)
// #define IS_SLASH_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) == SLASH)
#define IS_QUOTE(sql) (*(sql) == QUOTE && *((sql)-1) != SLASH)
// SLASH
//#define IS_SLASH_SLASH(sql) (*(sql) == SLASH && *((sql)-1) == SLASH)
// #define IS_SLASH_SLASH(sql) (*(sql) == SLASH && *((sql)-1) == SLASH)
#define IS_SLASH_LETTER(sql) \
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL || *(sql) == QUOTE || *(sql) == SLASH)) \
// (IS_SLASH_COMMA(sql) || IS_SLASH_SPACE(sql) || IS_SLASH_EQUAL(sql) || IS_SLASH_QUOTE(sql) || IS_SLASH_SLASH(sql))
#define IS_SLASH_LETTER(sql) \
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL || *(sql) == QUOTE || \
*(sql) == SLASH)) // (IS_SLASH_COMMA(sql) || IS_SLASH_SPACE(sql) || IS_SLASH_EQUAL(sql) ||
// IS_SLASH_QUOTE(sql) || IS_SLASH_SLASH(sql))
#define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len))
@ -53,15 +54,15 @@
#define BINARY_ADD_LEN 2 // "binary" 2 means " "
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
uint8_t smlPrecisionConvert[7] = {TSDB_TIME_PRECISION_NANO, TSDB_TIME_PRECISION_HOURS, TSDB_TIME_PRECISION_MINUTES,
uint8_t smlPrecisionConvert[7] = {TSDB_TIME_PRECISION_NANO, TSDB_TIME_PRECISION_HOURS, TSDB_TIME_PRECISION_MINUTES,
TSDB_TIME_PRECISION_SECONDS, TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO,
TSDB_TIME_PRECISION_NANO};
static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t len) {
uint8_t toPrecision = info->currSTableMeta ? info->currSTableMeta->tableInfo.precision : TSDB_TIME_PRECISION_NANO;
if(unlikely(len == 0 || (len == 1 && data[0] == '0'))){
return taosGetTimestampNs()/smlFactorNS[toPrecision];
if (unlikely(len == 0 || (len == 1 && data[0] == '0'))) {
return taosGetTimestampNs() / smlFactorNS[toPrecision];
}
uint8_t fromPrecision = smlPrecisionConvert[info->precision];
@ -75,7 +76,7 @@ static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t le
}
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
if (pVal->value[0] == '"'){ // binary
if (pVal->value[0] == '"') { // binary
if (pVal->length >= 2 && pVal->value[pVal->length - 1] == '"') {
pVal->type = TSDB_DATA_TYPE_BINARY;
pVal->length -= BINARY_ADD_LEN;
@ -88,8 +89,8 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
if(pVal->value[0] == 'l' || pVal->value[0] == 'L'){ // nchar
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3){
if (pVal->value[0] == 'l' || pVal->value[0] == 'L') { // nchar
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) {
pVal->type = TSDB_DATA_TYPE_NCHAR;
pVal->length -= NCHAR_ADD_LEN;
if (pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
@ -101,10 +102,10 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pVal->value[0] == 't' || pVal->value[0] == 'T'){
if(pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R')
&& (pVal->value[2] == 'u' || pVal->value[2] == 'U')
&& (pVal->value[3] == 'e' || pVal->value[3] == 'E'))){
if (pVal->value[0] == 't' || pVal->value[0] == 'T') {
if (pVal->length == 1 ||
(pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') &&
(pVal->value[2] == 'u' || pVal->value[2] == 'U') && (pVal->value[3] == 'e' || pVal->value[3] == 'E'))) {
pVal->i = TSDB_TRUE;
pVal->type = TSDB_DATA_TYPE_BOOL;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
@ -113,11 +114,11 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pVal->value[0] == 'f' || pVal->value[0] == 'F'){
if(pVal->length == 1 || (pVal->length == 5 && (pVal->value[1] == 'a' || pVal->value[1] == 'A')
&& (pVal->value[2] == 'l' || pVal->value[2] == 'L')
&& (pVal->value[3] == 's' || pVal->value[3] == 'S')
&& (pVal->value[4] == 'e' || pVal->value[4] == 'E'))){
if (pVal->value[0] == 'f' || pVal->value[0] == 'F') {
if (pVal->length == 1 ||
(pVal->length == 5 && (pVal->value[1] == 'a' || pVal->value[1] == 'A') &&
(pVal->value[2] == 'l' || pVal->value[2] == 'L') && (pVal->value[3] == 's' || pVal->value[3] == 'S') &&
(pVal->value[4] == 'e' || pVal->value[4] == 'E'))) {
pVal->i = TSDB_FALSE;
pVal->type = TSDB_DATA_TYPE_BOOL;
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
@ -135,9 +136,9 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){
if(isSameCTable){
static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement, bool isSameMeasure,
bool isSameCTable) {
if (isSameCTable) {
return TSDB_CODE_SUCCESS;
}
@ -146,15 +147,16 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
SArray *maxKVs = info->maxTagKVs;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(unlikely(!isSameMeasure)){
SSmlSTableMeta **tmp = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
if (info->dataFormat) {
if (unlikely(!isSameMeasure)) {
SSmlSTableMeta **tmp =
(SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
SSmlSTableMeta *sMeta = NULL;
if(unlikely(tmp == NULL)){
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
if(pTableMeta == NULL){
if (unlikely(tmp == NULL)) {
STableMeta *pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
if (pTableMeta == NULL) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
@ -165,15 +167,15 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
info->currSTableMeta = (*tmp)->tableMeta;
superKV = (*tmp)->tags;
if(unlikely(taosArrayGetSize(superKV) == 0)){
if (unlikely(taosArrayGetSize(superKV) == 0)) {
isSuperKVInit = false;
}
taosArraySetSize(maxKVs, 0);
taosArrayClear(maxKVs);
}
}else{
taosArraySetSize(maxKVs, 0);
} else {
taosArrayClear(maxKVs);
}
taosArraySetSize(preLineKV, 0);
taosArrayClear(preLineKV);
while (*sql < sqlEnd) {
if (unlikely(IS_SPACE(*sql))) {
@ -183,7 +185,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
bool hasSlash = false;
// parse key
const char *key = *sql;
size_t keyLen = 0;
size_t keyLen = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
@ -194,12 +196,12 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
(*sql)++;
break;
}
if(!hasSlash){
if (!hasSlash) {
hasSlash = (*(*sql) == SLASH);
}
(*sql)++;
}
if(unlikely(hasSlash)) {
if (unlikely(hasSlash)) {
PROCESS_SLASH(key, keyLen)
}
@ -210,18 +212,18 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
// parse value
const char *value = *sql;
size_t valueLen = 0;
size_t valueLen = 0;
hasSlash = false;
while (*sql < sqlEnd) {
// parse value
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
break;
}else if (unlikely(IS_EQUAL(*sql))) {
} else if (unlikely(IS_EQUAL(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
if(!hasSlash){
if (!hasSlash) {
hasSlash = (*(*sql) == SLASH);
}
@ -234,7 +236,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
return TSDB_CODE_SML_INVALID_DATA;
}
if(unlikely(hasSlash)) {
if (unlikely(hasSlash)) {
PROCESS_SLASH(value, valueLen)
}
@ -243,24 +245,25 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
}
SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen};
if(info->dataFormat){
if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){
if (info->dataFormat) {
if (unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(unlikely(cnt >= taosArrayGetSize(maxKVs))) {
if (isSameMeasure) {
if (unlikely(cnt >= taosArrayGetSize(maxKVs))) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt);
if(unlikely(kv.length > maxKV->length)){
if (unlikely(kv.length > maxKV->length)) {
maxKV->length = kv.length;
SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
if(unlikely(NULL == tableMeta)){
SSmlSTableMeta **tableMeta =
(SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
if (unlikely(NULL == tableMeta)) {
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
return TSDB_CODE_SML_INTERNAL_ERROR;
}
@ -269,49 +272,49 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
if (unlikely(!IS_SAME_KEY)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
} else {
if (isSuperKVInit) {
if (unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.length > maxKV->length)) {
if (unlikely(kv.length > maxKV->length)) {
maxKV->length = kv.length;
}else{
} else {
kv.length = maxKV->length;
}
info->needModifySchema = true;
if(unlikely(!IS_SAME_KEY)){
if (unlikely(!IS_SAME_KEY)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
} else {
taosArrayPush(superKV, &kv);
}
taosArrayPush(maxKVs, &kv);
}
}else{
} else {
taosArrayPush(maxKVs, &kv);
}
taosArrayPush(preLineKV, &kv);
cnt++;
if(IS_SPACE(*sql)){
if (IS_SPACE(*sql)) {
break;
}
(*sql)++;
}
void* oneTable = taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen);
void *oneTable = taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen);
if ((oneTable != NULL)) {
return TSDB_CODE_SUCCESS;
}
@ -324,10 +327,10 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
smlSetCTableName(tinfo);
tinfo->uid = info->uid++;
if(info->dataFormat) {
if (info->dataFormat) {
info->currSTableMeta->uid = tinfo->uid;
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if(tinfo->tableDataCtx == NULL){
if (tinfo->tableDataCtx == NULL) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
@ -338,15 +341,16 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
return TSDB_CODE_SUCCESS;
}
static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
SSmlLineInfo* currElement, bool isSameMeasure, bool isSameCTable){
static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement, bool isSameMeasure,
bool isSameCTable) {
int cnt = 0;
SArray *preLineKV = info->preLineColKV;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(unlikely(!isSameCTable)){
SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen);
if (info->dataFormat) {
if (unlikely(!isSameCTable)) {
SSmlTableInfo **oneTable =
(SSmlTableInfo **)taosHashGet(info->childTables, currElement->measure, currElement->measureTagsLen);
if (unlikely(oneTable == NULL)) {
smlBuildInvalidDataMsg(&info->msgBuf, "child table should inside", currElement->measure);
return TSDB_CODE_SML_INVALID_DATA;
@ -354,14 +358,15 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
info->currTableDataCtx = (*oneTable)->tableDataCtx;
}
if(unlikely(!isSameMeasure)){
SSmlSTableMeta **tmp = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
if (unlikely(!isSameMeasure)) {
SSmlSTableMeta **tmp =
(SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
SSmlSTableMeta *sMeta = NULL;
if(unlikely(tmp == NULL)){
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
if(pTableMeta == NULL){
if (unlikely(tmp == NULL)) {
STableMeta *pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
if (pTableMeta == NULL) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
@ -371,10 +376,10 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
}
info->currSTableMeta = (*tmp)->tableMeta;
superKV = (*tmp)->cols;
if(unlikely(taosArrayGetSize(superKV) == 0)){
if (unlikely(taosArrayGetSize(superKV) == 0)) {
isSuperKVInit = false;
}
taosArraySetSize(preLineKV, 0);
taosArrayClear(preLineKV);
}
}
@ -386,7 +391,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
bool hasSlash = false;
// parse key
const char *key = *sql;
size_t keyLen = 0;
size_t keyLen = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
@ -397,12 +402,12 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
(*sql)++;
break;
}
if(!hasSlash){
if (!hasSlash) {
hasSlash = (*(*sql) == SLASH);
}
(*sql)++;
}
if(unlikely(hasSlash)) {
if (unlikely(hasSlash)) {
PROCESS_SLASH(key, keyLen)
}
@ -413,9 +418,9 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
// parse value
const char *value = *sql;
size_t valueLen = 0;
hasSlash = false;
bool isInQuote = false;
size_t valueLen = 0;
hasSlash = false;
bool isInQuote = false;
while (*sql < sqlEnd) {
// parse value
if (unlikely(IS_QUOTE(*sql))) {
@ -423,7 +428,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
(*sql)++;
continue;
}
if (!isInQuote){
if (!isInQuote) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
break;
} else if (unlikely(IS_EQUAL(*sql))) {
@ -431,7 +436,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
return TSDB_CODE_SML_INVALID_DATA;
}
}
if(!hasSlash){
if (!hasSlash) {
hasSlash = (*(*sql) == SLASH);
}
@ -447,22 +452,22 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if(unlikely(hasSlash)) {
if (unlikely(hasSlash)) {
PROCESS_SLASH(value, valueLen)
}
SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen};
SSmlKv kv = {.key = key, .keyLen = keyLen, .value = value, .length = valueLen};
int32_t ret = smlParseValue(&kv, &info->msgBuf);
if (ret != TSDB_CODE_SUCCESS) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlParseValue error", value);
return ret;
}
if(info->dataFormat){
//cnt begin 0, add ts so + 2
if(unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)){
if (info->dataFormat) {
// cnt begin 0, add ts so + 2
if (unlikely(cnt + 2 > info->currSTableMeta->tableInfo.numOfColumns)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
// bind data
@ -470,27 +475,28 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uError("smlBuildCol error, retry");
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(cnt >= taosArrayGetSize(preLineKV)) {
if (isSameMeasure) {
if (cnt >= taosArrayGetSize(preLineKV)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(preLineKV, cnt);
if(kv.type != maxKV->type){
if (kv.type != maxKV->type) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)){
if (unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)) {
maxKV->length = kv.length;
SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
if(unlikely(NULL == tableMeta)){
SSmlSTableMeta **tableMeta =
(SSmlSTableMeta **)taosHashGet(info->superTables, currElement->measure, currElement->measureLen);
if (unlikely(NULL == tableMeta)) {
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
return TSDB_CODE_SML_INTERNAL_ERROR;
}
@ -499,53 +505,52 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
if (unlikely(!IS_SAME_KEY)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
} else {
if (isSuperKVInit) {
if (unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.type != maxKV->type)){
if (unlikely(kv.type != maxKV->type)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(IS_VAR_DATA_TYPE(kv.type)){
if(kv.length > maxKV->length) {
if (IS_VAR_DATA_TYPE(kv.type)) {
if (kv.length > maxKV->length) {
maxKV->length = kv.length;
}else{
} else {
kv.length = maxKV->length;
}
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
if (unlikely(!IS_SAME_KEY)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
} else {
taosArrayPush(superKV, &kv);
}
taosArrayPush(preLineKV, &kv);
}
}else{
if(currElement->colArray == NULL){
currElement->colArray = taosArrayInit(16, sizeof(SSmlKv));
taosArraySetSize(currElement->colArray, 1);
} else {
if (currElement->colArray == NULL) {
currElement->colArray = taosArrayInit_s(16, sizeof(SSmlKv), 1);
}
taosArrayPush(currElement->colArray, &kv); //reserve for timestamp
taosArrayPush(currElement->colArray, &kv); // reserve for timestamp
}
cnt++;
if(IS_SPACE(*sql)){
if (IS_SPACE(*sql)) {
break;
}
(*sql)++;
@ -583,8 +588,8 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
}
// to get measureTagsLen before
const char* tmp = sql;
while (tmp < sqlEnd){
const char *tmp = sql;
while (tmp < sqlEnd) {
if (unlikely(IS_SPACE(tmp))) {
break;
}
@ -594,10 +599,10 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
bool isSameCTable = false;
bool isSameMeasure = false;
if(IS_SAME_CHILD_TABLE){
if (IS_SAME_CHILD_TABLE) {
isSameCTable = true;
isSameMeasure = true;
}else if(info->dataFormat) {
} else if (info->dataFormat) {
isSameMeasure = IS_SAME_SUPER_TABLE;
}
// parse tag
@ -605,10 +610,10 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
elements->tags = sql;
int ret = smlParseTagKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable);
if(unlikely(ret != TSDB_CODE_SUCCESS)){
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret;
}
if(unlikely(info->reRun)){
if (unlikely(info->reRun)) {
return TSDB_CODE_SUCCESS;
}
@ -620,11 +625,11 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
elements->cols = sql;
ret = smlParseColKv(info, &sql, sqlEnd, elements, isSameMeasure, isSameCTable);
if(unlikely(ret != TSDB_CODE_SUCCESS)){
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
return ret;
}
if(unlikely(info->reRun)){
if (unlikely(info->reRun)) {
return TSDB_CODE_SUCCESS;
}
@ -651,16 +656,19 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
return TSDB_CODE_INVALID_TIMESTAMP;
}
// add ts to
SSmlKv kv = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
if(info->dataFormat){
SSmlKv kv = {.key = TS,
.keyLen = TS_LEN,
.type = TSDB_DATA_TYPE_TIMESTAMP,
.i = ts,
.length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
if (info->dataFormat) {
smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0);
smlBuildRow(info->currTableDataCtx);
clearColValArray(info->currTableDataCtx->pValues);
}else{
} else {
taosArraySet(elements->colArray, 0, &kv);
}
info->preLine = *elements;
return ret;
}

View File

@ -20,16 +20,17 @@
#include "clientSml.h"
int32_t is_same_child_table_telnet(const void *a, const void *b){
int32_t is_same_child_table_telnet(const void *a, const void *b) {
SSmlLineInfo *t1 = (SSmlLineInfo *)a;
SSmlLineInfo *t2 = (SSmlLineInfo *)b;
// uError("is_same_child_table_telnet len:%d,%d %s,%s @@@ len:%d,%d %s,%s", t1->measureLen, t2->measureLen,
// t1->measure, t2->measure, t1->tagsLen, t2->tagsLen, t1->tags, t2->tags);
if(t1 == NULL || t2 == NULL || t1->measure == NULL || t2->measure == NULL
|| t1->tags == NULL || t2->tags == NULL)
// uError("is_same_child_table_telnet len:%d,%d %s,%s @@@ len:%d,%d %s,%s", t1->measureLen, t2->measureLen,
// t1->measure, t2->measure, t1->tagsLen, t2->tagsLen, t1->tags, t2->tags);
if (t1 == NULL || t2 == NULL || t1->measure == NULL || t2->measure == NULL || t1->tags == NULL || t2->tags == NULL)
return 1;
return (((t1->measureLen == t2->measureLen) && memcmp(t1->measure, t2->measure, t1->measureLen) == 0)
&& ((t1->tagsLen == t2->tagsLen) && memcmp(t1->tags, t2->tags, t1->tagsLen) == 0)) ? 0 : 1;
return (((t1->measureLen == t2->measureLen) && memcmp(t1->measure, t2->measure, t1->measureLen) == 0) &&
((t1->tagsLen == t2->tagsLen) && memcmp(t1->tags, t2->tags, t1->tagsLen) == 0))
? 0
: 1;
}
int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) {
@ -40,7 +41,7 @@ int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) {
return -1;
}
if (unlikely(len == 1 && data[0] == '0')) {
return taosGetTimestampNs()/smlFactorNS[toPrecision];
return taosGetTimestampNs() / smlFactorNS[toPrecision];
}
int8_t fromPrecision = smlGetTsTypeByLen(len);
if (unlikely(fromPrecision == -1)) {
@ -56,7 +57,6 @@ int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t len) {
return ts;
}
static void smlParseTelnetElement(char **sql, char *sqlEnd, char **data, int32_t *len) {
while (*sql < sqlEnd) {
if (unlikely((**sql != SPACE && !(*data)))) {
@ -70,7 +70,7 @@ static void smlParseTelnetElement(char **sql, char *sqlEnd, char **data, int32_t
}
static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SSmlLineInfo *elements, SSmlMsgBuf *msg) {
if(is_same_child_table_telnet(elements, &info->preLine) == 0){
if (is_same_child_table_telnet(elements, &info->preLine) == 0) {
elements->measureTag = info->preLine.measureTag;
return TSDB_CODE_SUCCESS;
}
@ -82,15 +82,15 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
SArray *maxKVs = info->maxTagKVs;
bool isSuperKVInit = true;
SArray *superKV = NULL;
if(info->dataFormat){
if(!isSameMeasure){
if (info->dataFormat) {
if (!isSameMeasure) {
SSmlSTableMeta **tmp = (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen);
SSmlSTableMeta *sMeta = NULL;
if(unlikely(tmp == NULL)){
STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen);
if(pTableMeta == NULL){
SSmlSTableMeta *sMeta = NULL;
if (unlikely(tmp == NULL)) {
STableMeta *pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen);
if (pTableMeta == NULL) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
@ -101,23 +101,23 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
info->currSTableMeta = (*tmp)->tableMeta;
superKV = (*tmp)->tags;
if(unlikely(taosArrayGetSize(superKV) == 0)){
if (unlikely(taosArrayGetSize(superKV) == 0)) {
isSuperKVInit = false;
}
taosArraySetSize(maxKVs, 0);
taosArrayClear(maxKVs);
}
}else{
taosArraySetSize(maxKVs, 0);
} else {
taosArrayClear(maxKVs);
}
taosArraySetSize(preLineKV, 0);
taosArrayClear(preLineKV);
const char *sql = data;
while (sql < sqlEnd) {
JUMP_SPACE(sql, sqlEnd)
if (unlikely(*sql == '\0')) break;
const char *key = sql;
size_t keyLen = 0;
size_t keyLen = 0;
// parse key
while (sql < sqlEnd) {
@ -137,14 +137,14 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
// if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
// smlBuildInvalidDataMsg(msg, "dumplicate key", key);
// return TSDB_CODE_TSC_DUP_NAMES;
// }
// if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
// smlBuildInvalidDataMsg(msg, "dumplicate key", key);
// return TSDB_CODE_TSC_DUP_NAMES;
// }
// parse value
const char *value = sql;
size_t valueLen = 0;
size_t valueLen = 0;
while (sql < sqlEnd) {
// parse value
if (unlikely(*sql == SPACE)) {
@ -169,24 +169,25 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
SSmlKv kv = {.key = key, .keyLen = keyLen, .type = TSDB_DATA_TYPE_NCHAR, .value = value, .length = valueLen};
if(info->dataFormat){
if(unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)){
if (info->dataFormat) {
if (unlikely(cnt + 1 > info->currSTableMeta->tableInfo.numOfTags)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
if(isSameMeasure){
if(unlikely(cnt >= taosArrayGetSize(maxKVs))) {
if (isSameMeasure) {
if (unlikely(cnt >= taosArrayGetSize(maxKVs))) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(maxKVs, cnt);
if(unlikely(kv.length > maxKV->length)){
if (unlikely(kv.length > maxKV->length)) {
maxKV->length = kv.length;
SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen);
if(unlikely(NULL == tableMeta)){
SSmlSTableMeta **tableMeta =
(SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen);
if (unlikely(NULL == tableMeta)) {
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
return TSDB_CODE_SML_INTERNAL_ERROR;
}
@ -195,49 +196,50 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
oldKV->length = kv.length;
info->needModifySchema = true;
}
if(unlikely(!IS_SAME_KEY)){
if (unlikely(!IS_SAME_KEY)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
if(isSuperKVInit){
if(unlikely(cnt >= taosArrayGetSize(superKV))) {
} else {
if (isSuperKVInit) {
if (unlikely(cnt >= taosArrayGetSize(superKV))) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(superKV, cnt);
if(unlikely(kv.length > maxKV->length)) {
if (unlikely(kv.length > maxKV->length)) {
maxKV->length = kv.length;
}else{
} else {
kv.length = maxKV->length;
}
info->needModifySchema = true;
if(unlikely(!IS_SAME_KEY)){
if (unlikely(!IS_SAME_KEY)) {
info->dataFormat = false;
info->reRun = true;
info->reRun = true;
return TSDB_CODE_SUCCESS;
}
}else{
} else {
taosArrayPush(superKV, &kv);
}
taosArrayPush(maxKVs, &kv);
}
}else{
} else {
taosArrayPush(maxKVs, &kv);
}
taosArrayPush(preLineKV, &kv);
cnt++;
}
elements->measureTag = (char*)taosMemoryMalloc(elements->measureLen + elements->tagsLen);
elements->measureTag = (char *)taosMemoryMalloc(elements->measureLen + elements->tagsLen);
memcpy(elements->measureTag, elements->measure, elements->measureLen);
memcpy(elements->measureTag + elements->measureLen, elements->tags, elements->tagsLen);
elements->measureTagsLen = elements->measureLen + elements->tagsLen;
SSmlTableInfo **tmp = (SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen);
SSmlTableInfo **tmp =
(SSmlTableInfo **)taosHashGet(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen);
SSmlTableInfo *tinfo = NULL;
if (unlikely(tmp == NULL)) {
tinfo = smlBuildTableInfo(1, elements->measure, elements->measureLen);
@ -258,10 +260,11 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
}
}
// SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo));
// *key = *elements;
// tinfo->key = key;
taosHashPut(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen, &tinfo, POINTER_BYTES);
// SSmlLineInfo *key = (SSmlLineInfo *)taosMemoryMalloc(sizeof(SSmlLineInfo));
// *key = *elements;
// tinfo->key = key;
taosHashPut(info->childTables, elements->measureTag, elements->measureLen + elements->tagsLen, &tinfo,
POINTER_BYTES);
tmp = &tinfo;
}
if (info->dataFormat) info->currTableDataCtx = (*tmp)->tableDataCtx;
@ -288,7 +291,7 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
}
bool needConverTime = false; // get TS before parse tag(get meta), so need conver time
if(info->dataFormat && info->currSTableMeta == NULL){
if (info->dataFormat && info->currSTableMeta == NULL) {
needConverTime = true;
}
int64_t ts = smlParseOpenTsdbTime(info, elements->timestamp, elements->timestampLen);
@ -296,7 +299,11 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
return TSDB_CODE_INVALID_TIMESTAMP;
}
SSmlKv kvTs = { .key = TS, .keyLen = TS_LEN, .type = TSDB_DATA_TYPE_TIMESTAMP, .i = ts, .length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
SSmlKv kvTs = {.key = TS,
.keyLen = TS_LEN,
.type = TSDB_DATA_TYPE_TIMESTAMP,
.i = ts,
.length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
// parse value
smlParseTelnetElement(&sql, sqlEnd, &elements->cols, &elements->colsLen);
@ -324,19 +331,19 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
return ret;
}
if(unlikely(info->reRun)){
if (unlikely(info->reRun)) {
return TSDB_CODE_SUCCESS;
}
if(info->dataFormat){
if(needConverTime) {
if (info->dataFormat) {
if (needConverTime) {
kvTs.i = convertTimePrecision(kvTs.i, TSDB_TIME_PRECISION_NANO, info->currSTableMeta->tableInfo.precision);
}
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kvTs, 0);
if(ret == TSDB_CODE_SUCCESS){
if (ret == TSDB_CODE_SUCCESS) {
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 1);
}
if(ret == TSDB_CODE_SUCCESS){
if (ret == TSDB_CODE_SUCCESS) {
ret = smlBuildRow(info->currTableDataCtx);
}
clearColValArray(info->currTableDataCtx->pValues);
@ -344,8 +351,8 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
return ret;
}
}else{
if(elements->colArray == NULL){
} else {
if (elements->colArray == NULL) {
elements->colArray = taosArrayInit(16, sizeof(SSmlKv));
}
taosArrayPush(elements->colArray, &kvTs);

View File

@ -32,15 +32,15 @@
sem_post(x)
#endif
int32_t tmqAskEp(tmq_t* tmq, bool async);
typedef struct {
struct SMqMgmt {
int8_t inited;
tmr_h timer;
int32_t rsetId;
} SMqMgmt;
};
static SMqMgmt tmqMgmt = {0};
static TdThreadOnce tmqInit = PTHREAD_ONCE_INIT; // initialize only once
volatile int32_t tmqInitRes = 0; // initialize rsp code
static struct SMqMgmt tmqMgmt = {0};
typedef struct {
int8_t tmqRspType;
@ -65,8 +65,7 @@ struct tmq_conf_t {
int8_t withTbName;
int8_t snapEnable;
int32_t snapBatchSize;
bool hbBgEnable;
bool hbBgEnable;
uint16_t port;
int32_t autoCommitInterval;
@ -80,16 +79,15 @@ struct tmq_conf_t {
struct tmq_t {
int64_t refId;
// conf
char groupId[TSDB_CGROUP_LEN];
char clientId[256];
int8_t withTbName;
int8_t useSnapshot;
int8_t autoCommit;
int32_t autoCommitInterval;
int32_t resetOffsetCfg;
int64_t consumerId;
bool hbBgEnable;
char groupId[TSDB_CGROUP_LEN];
char clientId[256];
int8_t withTbName;
int8_t useSnapshot;
int8_t autoCommit;
int32_t autoCommitInterval;
int32_t resetOffsetCfg;
uint64_t consumerId;
bool hbBgEnable;
tmq_commit_cb* commitCb;
void* commitCbUserParam;
@ -155,11 +153,9 @@ typedef struct {
typedef struct {
// subscribe info
char topicName[TSDB_TOPIC_FNAME_LEN];
char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray<SMqClientVg>
char topicName[TSDB_TOPIC_FNAME_LEN];
char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray<SMqClientVg>
SSchemaWrapper schema;
} SMqClientTopic;
@ -221,13 +217,21 @@ typedef struct {
/*int32_t vgId;*/
} SMqCommitCbParam;
static int32_t tmqAskEp(tmq_t* tmq, bool async);
tmq_conf_t* tmq_conf_new() {
tmq_conf_t* conf = taosMemoryCalloc(1, sizeof(tmq_conf_t));
if (conf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return conf;
}
conf->withTbName = false;
conf->autoCommit = true;
conf->autoCommitInterval = 5000;
conf->resetOffset = TMQ_CONF__RESET_OFFSET__EARLIEAST;
conf->hbBgEnable = true;
return conf;
}
@ -508,8 +512,8 @@ static int32_t tmqSendCommitReq(tmq_t* tmq, SMqClientVg* pVg, SMqClientTopic* pT
.handle = NULL,
};
tscDebug("consumer:%" PRId64 ", commit offset of %s on vgId:%d, offset is %" PRId64, tmq->consumerId, pOffset->subKey,
pVg->vgId, pOffset->val.version);
tscDebug("consumer:0x%" PRIx64 " topic:%s on vgId:%d offset:%" PRId64, tmq->consumerId, pOffset->subKey, pVg->vgId,
pOffset->val.version);
// TODO: put into cb
pVg->committedOffset = pVg->currentOffset;
@ -638,21 +642,18 @@ static int32_t tmqCommitConsumerImpl(tmq_t* tmq, int8_t automatic, int8_t async,
for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgNum %d", tmq->consumerId, pTopic->topicName,
(int32_t)taosArrayGetSize(pTopic->vgs));
for (int32_t j = 0; j < taosArrayGetSize(pTopic->vgs); j++) {
int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs);
for (int32_t j = 0; j < numOfVgroups; j++) {
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
tscDebug("consumer:%" PRId64 ", begin commit for topic %s, vgId:%d", tmq->consumerId, pTopic->topicName,
pVg->vgId);
if (pVg->currentOffset.type > 0 && !tOffsetEqual(&pVg->currentOffset, &pVg->committedOffset)) {
tscDebug("consumer: %" PRId64 ", vg:%d, current %" PRId64 ", committed %" PRId64 "", tmq->consumerId, pVg->vgId,
pVg->currentOffset.version, pVg->committedOffset.version);
tscDebug("consumer:0x%" PRIx64 " topic:%s vgId:%d, current %" PRId64 ", committed %" PRId64, tmq->consumerId,
pTopic->topicName, pVg->vgId, pVg->currentOffset.version, pVg->committedOffset.version);
if (tmqSendCommitReq(tmq, pVg, pTopic, pParamSet) < 0) {
continue;
}
} else {
tscDebug("consumer:0x%" PRIx64 " topic:%s vgId:%d, not commit, current:%" PRId64 ", ordinal:%d/%d",
tmq->consumerId, pTopic->topicName, pVg->vgId, pVg->currentOffset.version, j + 1, numOfVgroups);
}
}
}
@ -788,32 +789,44 @@ OVER:
taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
}
int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) {
STaosQall* qall = taosAllocateQall();
taosReadAllQitems(tmq->delayedTask, qall);
while (1) {
int8_t* pTaskType = NULL;
taosGetQitem(qall, (void**)&pTaskType);
if (pTaskType == NULL) break;
taosReadAllQitems(pTmq->delayedTask, qall);
if (qall->numOfItems == 0) {
taosFreeQall(qall);
return TSDB_CODE_SUCCESS;
}
tscDebug("consumer:0x%" PRIx64 " handle delayed %d tasks before poll data", pTmq->consumerId, qall->numOfItems);
int8_t* pTaskType = NULL;
taosGetQitem(qall, (void**)&pTaskType);
while (pTaskType != NULL) {
if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) {
tmqAskEp(tmq, true);
tmqAskEp(pTmq, true);
int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
*pRefId = tmq->refId;
*pRefId = pTmq->refId;
taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &tmq->epTimer);
tscDebug("consumer:0x%" PRIx64 " next retrieve ep from mnode in 1s", pTmq->consumerId);
taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &pTmq->epTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
tmqCommitInner(pTmq, NULL, 1, 1, pTmq->commitCb, pTmq->commitCbUserParam);
int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
*pRefId = tmq->refId;
*pRefId = pTmq->refId;
taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer);
tscDebug("consumer:0x%" PRIx64 " next commit to mnode in %.2fs", pTmq->consumerId,
pTmq->autoCommitInterval / 1000.0);
taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, pRefId, tmqMgmt.timer, &pTmq->commitTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
}
taosFreeQitem(pTaskType);
taosGetQitem(qall, (void**)&pTaskType);
}
taosFreeQall(qall);
return 0;
}
@ -932,23 +945,31 @@ void tmqFreeImpl(void* handle) {
taosMemoryFree(tmq);
}
static void tmqMgmtInit(void) {
tmqInitRes = 0;
tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ");
if (tmqMgmt.timer == NULL) {
tmqInitRes = TSDB_CODE_OUT_OF_MEMORY;
}
tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl);
if (tmqMgmt.rsetId < 0) {
tmqInitRes = terrno;
}
}
tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
// init timer
int8_t inited = atomic_val_compare_exchange_8(&tmqMgmt.inited, 0, 1);
if (inited == 0) {
tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ");
if (tmqMgmt.timer == NULL) {
atomic_store_8(&tmqMgmt.inited, 0);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl);
taosThreadOnce(&tmqInit, tmqMgmtInit);
if (tmqInitRes != 0) {
terrno = tmqInitRes;
return NULL;
}
tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t));
if (pTmq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tscError("setting up new consumer failed since %s, consumer group %s", terrstr(), conf->groupId);
tscError("failed to create consumer, consumer group %s, code:%s", conf->groupId, terrstr());
return NULL;
}
@ -962,7 +983,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
if (pTmq->clientTopics == NULL || pTmq->mqueue == NULL || pTmq->qall == NULL || pTmq->delayedTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(),
tscError("consumer:0x%" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(),
pTmq->groupId);
goto FAIL;
}
@ -992,7 +1013,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
// init semaphore
if (tsem_init(&pTmq->rspSem, 0, 0) != 0) {
tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(),
tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(),
pTmq->groupId);
goto FAIL;
}
@ -1000,7 +1021,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
// init connection
pTmq->pTscObj = taos_connect_internal(conf->ip, user, pass, NULL, NULL, conf->port, CONN_TYPE__TMQ);
if (pTmq->pTscObj == NULL) {
tscError("consumer %" PRId64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(),
tscError("consumer:0x %" PRIx64 " setup failed since %s, consumer group %s", pTmq->consumerId, terrstr(),
pTmq->groupId);
tsem_destroy(&pTmq->rspSem);
goto FAIL;
@ -1018,8 +1039,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
tscInfo("consumer %" PRId64 " is setup, consumer group %s", pTmq->consumerId, pTmq->groupId);
tscInfo("consumer:0x%" PRIx64 " is setup, consumer groupId %s", pTmq->consumerId, pTmq->groupId);
return pTmq;
FAIL:
@ -1028,6 +1048,7 @@ FAIL:
if (pTmq->delayedTask) taosCloseQueue(pTmq->delayedTask);
if (pTmq->qall) taosFreeQall(pTmq->qall);
taosMemoryFree(pTmq);
return NULL;
}
@ -1037,44 +1058,52 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
void* buf = NULL;
SMsgSendInfo* sendInfo = NULL;
SCMSubscribeReq req = {0};
int32_t code = -1;
int32_t code = 0;
tscDebug("tmq subscribe, consumer: %" PRId64 ", topic num %d", tmq->consumerId, sz);
tscDebug("consumer:0x%" PRIx64 " tmq subscribe start, numOfTopic %d", tmq->consumerId, sz);
req.consumerId = tmq->consumerId;
tstrncpy(req.clientId, tmq->clientId, 256);
tstrncpy(req.cgroup, tmq->groupId, TSDB_CGROUP_LEN);
req.topicNames = taosArrayInit(sz, sizeof(void*));
if (req.topicNames == NULL) goto FAIL;
tscDebug("tmq subscribe, consumer: %" PRId64 ", topic num %d", tmq->consumerId, sz);
if (req.topicNames == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto FAIL;
}
for (int32_t i = 0; i < sz; i++) {
char* topic = taosArrayGetP(container, i);
SName name = {0};
tNameSetDbName(&name, tmq->pTscObj->acctId, topic, strlen(topic));
char* topicFName = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN);
if (topicFName == NULL) {
goto FAIL;
}
tNameExtractFullName(&name, topicFName);
tscDebug("subscribe topic: %s", topicFName);
tNameExtractFullName(&name, topicFName);
tscDebug("consumer:0x%" PRIx64 ", subscribe topic: %s", tmq->consumerId, topicFName);
taosArrayPush(req.topicNames, &topicFName);
}
int32_t tlen = tSerializeSCMSubscribeReq(NULL, &req);
buf = taosMemoryMalloc(tlen);
if (buf == NULL) goto FAIL;
if (buf == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto FAIL;
}
void* abuf = buf;
tSerializeSCMSubscribeReq(&abuf, &req);
sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) goto FAIL;
if (sendInfo == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto FAIL;
}
SMqSubscribeCbParam param = {
.rspErr = 0,
@ -1082,7 +1111,9 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
.epoch = tmq->epoch,
};
if (tsem_init(&param.rspSem, 0, 0) != 0) goto FAIL;
if (tsem_init(&param.rspSem, 0, 0) != 0) {
goto FAIL;
}
sendInfo->msgInfo = (SDataBuf){
.pData = buf,
@ -1108,15 +1139,18 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
tsem_wait(&param.rspSem);
tsem_destroy(&param.rspSem);
code = param.rspErr;
if (code != 0) goto FAIL;
if (param.rspErr != 0) {
code = param.rspErr;
goto FAIL;
}
int32_t retryCnt = 0;
while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) {
if (retryCnt++ > 10) {
goto FAIL;
}
tscDebug("consumer not ready, retry");
tscDebug("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt);
taosMsleep(500);
}
@ -1134,7 +1168,6 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId2, tmqMgmt.timer);
}
code = 0;
FAIL:
taosArrayDestroyP(req.topicNames, taosMemoryFree);
taosMemoryFree(buf);
@ -1229,7 +1262,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
tscDebug("consumer:0x%" PRIx64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
rspType);
@ -1250,7 +1283,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
tscDebug("consumer:%" PRId64 ", put poll res into mqueue %p", tmq->consumerId, pRspWrapper);
tscDebug("consumer:0x%" PRIx64 ", put poll res into mqueue %p", tmq->consumerId, pRspWrapper);
taosWriteQitem(tmq->mqueue, pRspWrapper);
tsem_post(&tmq->rspSem);
@ -1267,10 +1300,12 @@ CREATE_MSG_FAIL:
bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
bool set = false;
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
tscDebug("consumer:%" PRId64 ", update ep epoch %d to epoch %d, topic num:%d", tmq->consumerId, tmq->epoch, epoch,
topicNumGet);
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
tscDebug("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d",
tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur);
SArray* newTopics = taosArrayInit(topicNumGet, sizeof(SMqClientTopic));
if (newTopics == NULL) {
@ -1282,19 +1317,19 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
taosArrayDestroy(newTopics);
return false;
}
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
for (int32_t i = 0; i < topicNumCur; i++) {
// find old topic
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i);
if (pTopicCur->vgs) {
int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs);
tscDebug("consumer:%" PRId64 ", new vg num: %d", tmq->consumerId, vgNumCur);
tscDebug("consumer:0x%" PRIx64 ", new vg num: %d", tmq->consumerId, vgNumCur);
for (int32_t j = 0; j < vgNumCur; j++) {
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
sprintf(vgKey, "%s:%d", pTopicCur->topicName, pVgCur->vgId);
char buf[80];
tFormatOffset(buf, 80, &pVgCur->currentOffset);
tscDebug("consumer:%" PRId64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch,
tscDebug("consumer:0x%" PRIx64 ", epoch %d vgId:%d vgKey is %s, offset is %s", tmq->consumerId, epoch,
pVgCur->vgId, vgKey, buf);
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(STqOffsetVal));
}
@ -1310,7 +1345,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName);
tscDebug("consumer:0x%" PRIx64 ", update topic: %s", tmq->consumerId, topic.topicName);
int32_t vgNumGet = taosArrayGetSize(pTopicEp->vgs);
topic.vgs = taosArrayInit(vgNumGet, sizeof(SMqClientVg));
@ -1336,6 +1371,8 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
}
taosArrayPush(newTopics, &topic);
}
// destroy current buffered existed topics info
if (tmq->clientTopics) {
int32_t sz = taosArrayGetSize(tmq->clientTopics);
for (int32_t i = 0; i < sz; i++) {
@ -1343,17 +1380,21 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema);
taosArrayDestroy(pTopic->vgs);
}
taosArrayDestroy(tmq->clientTopics);
}
taosHashCleanup(pHash);
tmq->clientTopics = newTopics;
if (taosArrayGetSize(tmq->clientTopics) == 0)
if (taosArrayGetSize(tmq->clientTopics) == 0) {
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__NO_TOPIC);
else
} else {
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__READY);
}
atomic_store_32(&tmq->epoch, epoch);
tscDebug("consumer:0x%" PRIx64 ", update topic info completed", tmq->consumerId);
return set;
}
@ -1376,8 +1417,8 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
pParam->code = code;
if (code != 0) {
tscError("consumer:%" PRId64 ", get topic endpoint error, not ready, wait:%d, code %x", tmq->consumerId,
pParam->async, code);
tscError("consumer:0x%" PRIx64 ", get topic endpoint error, async:%d, code:%s", tmq->consumerId, pParam->async,
tstrerror(code));
goto END;
}
@ -1386,11 +1427,15 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
// Epoch will only increase when received newer epoch ep msg
SMqRspHead* head = pMsg->pData;
int32_t epoch = atomic_load_32(&tmq->epoch);
tscDebug("consumer:%" PRId64 ", recv ep, msg epoch %d, current epoch %d", tmq->consumerId, head->epoch, epoch);
if (head->epoch <= epoch) {
tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, no need to update local ep",
tmq->consumerId, head->epoch, epoch);
goto END;
}
tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId,
head->epoch, epoch);
if (!async) {
SMqAskEpRsp rsp;
tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &rsp);
@ -1405,6 +1450,7 @@ int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
code = -1;
goto END;
}
pWrapper->tmqRspType = TMQ_MSG_TYPE__EP_RSP;
pWrapper->epoch = head->epoch;
memcpy(&pWrapper->msg, pMsg->pData, sizeof(SMqRspHead));
@ -1428,16 +1474,17 @@ END:
}
int32_t tmqAskEp(tmq_t* tmq, bool async) {
int32_t code = 0;
int32_t code = TSDB_CODE_SUCCESS;
#if 0
int8_t epStatus = atomic_val_compare_exchange_8(&tmq->epStatus, 0, 1);
if (epStatus == 1) {
int32_t epSkipCnt = atomic_add_fetch_32(&tmq->epSkipCnt, 1);
tscTrace("consumer:%" PRId64 ", skip ask ep cnt %d", tmq->consumerId, epSkipCnt);
tscTrace("consumer:0x%" PRIx64 ", skip ask ep cnt %d", tmq->consumerId, epSkipCnt);
if (epSkipCnt < 5000) return 0;
}
atomic_store_32(&tmq->epSkipCnt, 0);
#endif
SMqAskEpReq req = {0};
req.consumerId = tmq->consumerId;
req.epoch = tmq->epoch;
@ -1445,27 +1492,31 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
int32_t tlen = tSerializeSMqAskEpReq(NULL, 0, &req);
if (tlen < 0) {
tscError("tSerializeSMqAskEpReq failed");
tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq failed", tmq->consumerId);
return -1;
}
void* pReq = taosMemoryCalloc(1, tlen);
if (pReq == NULL) {
tscError("failed to malloc askEpReq msg, size:%d", tlen);
tscError("consumer:0x%" PRIx64 ", failed to malloc askEpReq msg, size:%d", tmq->consumerId, tlen);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (tSerializeSMqAskEpReq(pReq, tlen, &req) < 0) {
tscError("tSerializeSMqAskEpReq %d failed", tlen);
tscError("consumer:0x%" PRIx64 ", tSerializeSMqAskEpReq %d failed", tmq->consumerId, tlen);
taosMemoryFree(pReq);
return -1;
}
SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam));
if (pParam == NULL) {
tscError("failed to malloc subscribe param");
tscError("consumer:0x%" PRIx64 ", failed to malloc subscribe param", tmq->consumerId);
taosMemoryFree(pReq);
/*atomic_store_8(&tmq->epStatus, 0);*/
return -1;
}
pParam->refId = tmq->refId;
pParam->epoch = tmq->epoch;
pParam->async = async;
@ -1486,15 +1537,14 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
.handle = NULL,
};
sendInfo->requestId = generateRequestId();
sendInfo->requestId = tmq->consumerId;
sendInfo->requestObjRefId = 0;
sendInfo->param = pParam;
sendInfo->fp = tmqAskEpCb;
sendInfo->msgType = TDMT_MND_TMQ_ASK_EP;
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
tscDebug("consumer:%" PRId64 ", ask ep", tmq->consumerId);
tscDebug("consumer:0x%" PRIx64 " ask ep from mnode, async:%d", tmq->consumerId, async);
int64_t transporterId = 0;
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
@ -1504,6 +1554,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
code = pParam->code;
taosMemoryFree(pParam);
}
return code;
}
@ -1576,26 +1627,31 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
// broadcast the poll request to all related vnodes
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics);
tscDebug("consumer:0x%" PRIx64 " start to poll data, numOfTopics:%d", tmq->consumerId, numOfTopics);
for (int i = 0; i < numOfTopics; i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
for (int j = 0; j < taosArrayGetSize(pTopic->vgs); j++) {
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
int32_t vgStatus = atomic_val_compare_exchange_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE, TMQ_VG_STATUS__WAIT);
if (vgStatus != TMQ_VG_STATUS__IDLE) {
if (vgStatus == TMQ_VG_STATUS__WAIT) {
int32_t vgSkipCnt = atomic_add_fetch_32(&pVg->vgSkipCnt, 1);
tscTrace("consumer:%" PRId64 ", epoch %d skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch, pVg->vgId,
vgSkipCnt);
tscDebug("consumer:0x%" PRIx64 " epoch %d wait poll-rsp, skip vgId:%d skip cnt %d", tmq->consumerId, tmq->epoch,
pVg->vgId, vgSkipCnt);
continue;
/*if (vgSkipCnt < 10000) continue;*/
#if 0
if (skipCnt < 30000) {
continue;
} else {
tscDebug("consumer:%" PRId64 ",skip vgId:%d skip too much reset", tmq->consumerId, pVg->vgId);
tscDebug("consumer:0x%" PRIx64 ",skip vgId:%d skip too much reset", tmq->consumerId, pVg->vgId);
}
#endif
}
atomic_store_32(&pVg->vgSkipCnt, 0);
SMqPollReq req = {0};
@ -1606,6 +1662,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
tsem_post(&tmq->rspSem);
return -1;
}
char* msg = taosMemoryCalloc(1, msgSize);
if (NULL == msg) {
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
@ -1627,6 +1684,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
tsem_post(&tmq->rspSem);
return -1;
}
pParam->refId = tmq->refId;
pParam->epoch = tmq->epoch;
@ -1648,6 +1706,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
.len = msgSize,
.handle = NULL,
};
sendInfo->requestId = req.reqId;
sendInfo->requestObjRefId = 0;
sendInfo->param = pParam;
@ -1655,18 +1714,19 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
sendInfo->msgType = TDMT_VND_TMQ_CONSUME;
int64_t transporterId = 0;
/*printf("send poll\n");*/
char offsetFormatBuf[80];
tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffset);
tscDebug("consumer:%" PRId64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:%" PRIu64,
tscDebug("consumer:0x%" PRIx64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:0x%" PRIx64,
tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
/*printf("send vgId:%d %" PRId64 "\n", pVg->vgId, pVg->currentOffset);*/
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
pVg->pollCnt++;
tmq->pollCnt++;
}
}
return 0;
}
@ -1704,7 +1764,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
}
}
tscDebug("consumer:%" PRId64 " handle rsp %p", tmq->consumerId, rspWrapper);
tscDebug("consumer:0x%" PRIx64 " handle rsp %p", tmq->consumerId, rspWrapper);
if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__END_RSP) {
taosFreeQitem(rspWrapper);
@ -1712,7 +1772,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
return NULL;
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) {
SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
tscDebug("consumer %" PRId64 " actual process poll rsp", tmq->consumerId);
tscDebug("consumer:0x%" PRIx64 " actual process poll rsp", tmq->consumerId);
/*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
if (pollRspWrapper->dataRsp.head.epoch == consumerEpoch) {
@ -1731,8 +1791,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
taosFreeQitem(pollRspWrapper);
return pRsp;
} else {
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
pollRspWrapper->dataRsp.head.epoch, consumerEpoch);
tscDebug("consumer:0x%" PRIx64 " msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
tmq->consumerId, pollRspWrapper->dataRsp.head.epoch, consumerEpoch);
tmqFreeRspWrapper(rspWrapper);
taosFreeQitem(pollRspWrapper);
}
@ -1750,8 +1810,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
taosFreeQitem(pollRspWrapper);
return pRsp;
} else {
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
tscDebug("consumer:0x%" PRIx64 " msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
tmq->consumerId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
tmqFreeRspWrapper(rspWrapper);
taosFreeQitem(pollRspWrapper);
}
@ -1781,8 +1841,8 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
taosFreeQitem(pollRspWrapper);
return pRsp;
} else {
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
tscDebug("consumer:0x%" PRIx64 " msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
tmq->consumerId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
tmqFreeRspWrapper(rspWrapper);
taosFreeQitem(pollRspWrapper);
}
@ -1792,7 +1852,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
tmqHandleNoPollRsp(tmq, rspWrapper, &reset);
taosFreeQitem(rspWrapper);
if (pollIfReset && reset) {
tscDebug("consumer:%" PRId64 ", reset and repoll", tmq->consumerId);
tscDebug("consumer:0x%" PRIx64 ", reset and repoll", tmq->consumerId);
tmqPollImpl(tmq, timeout);
}
}
@ -1803,7 +1863,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
void* rspObj;
int64_t startTime = taosGetTimestampMs();
tscDebug("consumer:%" PRId64 ", start poll at %" PRId64, tmq->consumerId, startTime);
tscDebug("consumer:0x%" PRIx64 ", start poll at %" PRId64, tmq->consumerId, startTime);
#if 0
tmqHandleAllDelayedTask(tmq);
@ -1816,7 +1876,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
// in no topic status, delayed task also need to be processed
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) {
tscDebug("consumer:%" PRId64 ", poll return since consumer status is init", tmq->consumerId);
tscDebug("consumer:0x%" PRIx64 ", poll return since consumer status is init", tmq->consumerId);
return NULL;
}
@ -1826,35 +1886,38 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
if (retryCnt++ > 10) {
return NULL;
}
tscDebug("consumer not ready, retry");
tscDebug("consumer:0x%" PRIx64 " not ready, retry:%d/10 in 500ms", tmq->consumerId, retryCnt);
taosMsleep(500);
}
}
while (1) {
tmqHandleAllDelayedTask(tmq);
if (tmqPollImpl(tmq, timeout) < 0) {
tscDebug("consumer:%" PRId64 " return since poll err", tmq->consumerId);
tscDebug("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId);
/*return NULL;*/
}
rspObj = tmqHandleAllRsp(tmq, timeout, false);
if (rspObj) {
tscDebug("consumer:%" PRId64 ", return rsp %p", tmq->consumerId, rspObj);
tscDebug("consumer:0x%" PRIx64 ", return rsp %p", tmq->consumerId, rspObj);
return (TAOS_RES*)rspObj;
} else if (terrno == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
tscDebug("consumer:%" PRId64 ", return null since no committed offset", tmq->consumerId);
tscDebug("consumer:0x%" PRIx64 ", return null since no committed offset", tmq->consumerId);
return NULL;
}
if (timeout != -1) {
int64_t currentTime = taosGetTimestampMs();
int64_t passedTime = currentTime - startTime;
if (passedTime > timeout) {
tscDebug("consumer:%" PRId64 ", (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
tscDebug("consumer:0x%" PRIx64 ", (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
tmq->consumerId, tmq->epoch, startTime, currentTime);
return NULL;
}
/*tscInfo("consumer:%" PRId64 ", (epoch %d) wait, start time %" PRId64 ", current time %" PRId64*/
/*tscInfo("consumer:0x%" PRIx64 ", (epoch %d) wait, start time %" PRId64 ", current time %" PRId64*/
/*", left time %" PRId64,*/
/*tmq->consumerId, tmq->epoch, startTime, currentTime, (timeout - passedTime));*/
tsem_timewait(&tmq->rspSem, (timeout - passedTime));

View File

@ -162,6 +162,11 @@ void *queryThread(void *arg) {
}
static int32_t numOfThreads = 1;
void tmq_commit_cb_print(tmq_t *pTmq, int32_t code, void *param) {
printf("success, code:%d\n", code);
}
} // namespace
int main(int argc, char** argv) {
@ -176,12 +181,12 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS();
}
TEST(testCase, driverInit_Test) {
TEST(clientCase, driverInit_Test) {
// taosInitGlobalCfg();
// taos_init();
}
TEST(testCase, connect_Test) {
TEST(clientCase, connect_Test) {
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@ -190,8 +195,8 @@ TEST(testCase, connect_Test) {
}
taos_close(pConn);
}
#if 0
TEST(testCase, create_user_Test) {
TEST(clientCase, create_user_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -204,7 +209,7 @@ TEST(testCase, create_user_Test) {
taos_close(pConn);
}
TEST(testCase, create_account_Test) {
TEST(clientCase, create_account_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -217,7 +222,7 @@ TEST(testCase, create_account_Test) {
taos_close(pConn);
}
TEST(testCase, drop_account_Test) {
TEST(clientCase, drop_account_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -230,7 +235,7 @@ TEST(testCase, drop_account_Test) {
taos_close(pConn);
}
TEST(testCase, show_user_Test) {
TEST(clientCase, show_user_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -250,7 +255,7 @@ TEST(testCase, show_user_Test) {
taos_close(pConn);
}
TEST(testCase, drop_user_Test) {
TEST(clientCase, drop_user_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -263,7 +268,7 @@ TEST(testCase, drop_user_Test) {
taos_close(pConn);
}
TEST(testCase, show_db_Test) {
TEST(clientCase, show_db_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -282,7 +287,7 @@ TEST(testCase, show_db_Test) {
taos_close(pConn);
}
TEST(testCase, create_db_Test) {
TEST(clientCase, create_db_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -306,7 +311,7 @@ TEST(testCase, create_db_Test) {
taos_close(pConn);
}
TEST(testCase, create_dnode_Test) {
TEST(clientCase, create_dnode_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -325,7 +330,7 @@ TEST(testCase, create_dnode_Test) {
taos_close(pConn);
}
TEST(testCase, drop_dnode_Test) {
TEST(clientCase, drop_dnode_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -349,7 +354,7 @@ TEST(testCase, drop_dnode_Test) {
taos_close(pConn);
}
TEST(testCase, use_db_test) {
TEST(clientCase, use_db_test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -367,7 +372,7 @@ TEST(testCase, use_db_test) {
taos_close(pConn);
}
// TEST(testCase, drop_db_test) {
// TEST(clientCase, drop_db_test) {
// TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
// assert(pConn != NULL);
//
@ -389,7 +394,7 @@ TEST(testCase, use_db_test) {
// taos_close(pConn);
//}
TEST(testCase, create_stable_Test) {
TEST(clientCase, create_stable_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -428,7 +433,7 @@ TEST(testCase, create_stable_Test) {
taos_close(pConn);
}
TEST(testCase, create_table_Test) {
TEST(clientCase, create_table_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -447,7 +452,7 @@ TEST(testCase, create_table_Test) {
taos_close(pConn);
}
TEST(testCase, create_ctable_Test) {
TEST(clientCase, create_ctable_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -472,7 +477,7 @@ TEST(testCase, create_ctable_Test) {
taos_close(pConn);
}
TEST(testCase, show_stable_Test) {
TEST(clientCase, show_stable_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != nullptr);
@ -497,7 +502,7 @@ TEST(testCase, show_stable_Test) {
taos_close(pConn);
}
TEST(testCase, show_vgroup_Test) {
TEST(clientCase, show_vgroup_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -529,7 +534,7 @@ TEST(testCase, show_vgroup_Test) {
taos_close(pConn);
}
TEST(testCase, create_multiple_tables) {
TEST(clientCase, create_multiple_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -600,7 +605,7 @@ TEST(testCase, create_multiple_tables) {
taos_close(pConn);
}
TEST(testCase, show_table_Test) {
TEST(clientCase, show_table_Test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
@ -634,7 +639,7 @@ TEST(testCase, show_table_Test) {
taos_close(pConn);
}
//TEST(testCase, drop_stable_Test) {
//TEST(clientCase, drop_stable_Test) {
// TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
// assert(pConn != nullptr);
//
@ -659,14 +664,14 @@ TEST(testCase, show_table_Test) {
// taos_close(pConn);
//}
TEST(testCase, generated_request_id_test) {
TEST(clientCase, generated_request_id_test) {
SHashObj* phash = taosHashInit(10000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
for (int32_t i = 0; i < 50000; ++i) {
uint64_t v = generateRequestId();
void* result = taosHashGet(phash, &v, sizeof(v));
if (result != nullptr) {
printf("0x%lx, index:%d\n", v, i);
// printf("0x%llx, index:%d\n", v, i);
}
assert(result == nullptr);
taosHashPut(phash, &v, sizeof(v), NULL, 0);
@ -675,7 +680,7 @@ TEST(testCase, generated_request_id_test) {
taosHashCleanup(phash);
}
TEST(testCase, insert_test) {
TEST(clientCase, insert_test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -692,9 +697,8 @@ TEST(testCase, insert_test) {
taos_free_result(pRes);
taos_close(pConn);
}
#endif
TEST(testCase, projection_query_tables) {
TEST(clientCase, projection_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -752,8 +756,7 @@ TEST(testCase, projection_query_tables) {
taos_close(pConn);
}
#if 0
TEST(testCase, tsbs_perf_test) {
TEST(clientCase, tsbs_perf_test) {
TdThread qid[20] = {0};
for(int32_t i = 0; i < numOfThreads; ++i) {
@ -762,7 +765,7 @@ TEST(testCase, tsbs_perf_test) {
getchar();
}
TEST(testCase, projection_query_stables) {
TEST(clientCase, projection_query_stables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -790,7 +793,7 @@ TEST(testCase, projection_query_stables) {
taos_close(pConn);
}
TEST(testCase, agg_query_tables) {
TEST(clientCase, agg_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -825,7 +828,7 @@ create table tm1 using m1 tags(2);
insert into tm0 values('2021-1-1 1:1:1.120', 1) ('2021-1-1 1:1:2.9', 2) tm1 values('2021-1-1 1:1:1.120', 11) ('2021-1-1 1:1:2.99', 22);
*/
TEST(testCase, async_api_test) {
TEST(clientCase, async_api_test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -859,7 +862,7 @@ TEST(testCase, async_api_test) {
taos_close(pConn);
}
TEST(testCase, update_test) {
TEST(clientCase, update_test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -895,6 +898,76 @@ TEST(testCase, update_test) {
}
}
#endif
TEST(clientCase, subscription_test) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
// TAOS_RES* pRes = taos_query(pConn, "create topic topic_t1 as select * from t1");
// if (taos_errno(pRes) != TSDB_CODE_SUCCESS) {
// printf("failed to create topic, code:%s", taos_errstr(pRes));
// taos_free_result(pRes);
// return;
// }
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
tmq_conf_set(conf, "group.id", "newabcdefgjhijlm__");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
tmq_conf_destroy(conf);
// 创建订阅 topics 列表
tmq_list_t* topicList = tmq_list_new();
tmq_list_append(topicList, "topic_t1");
// 启动订阅
tmq_subscribe(tmq, topicList);
tmq_list_destroy(topicList);
TAOS_FIELD* fields = NULL;
int32_t numOfFields = 0;
int32_t precision = 0;
int32_t totalRows = 0;
int32_t msgCnt = 0;
int32_t timeout = 5000;
while (1) {
TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout);
if (pRes) {
char buf[1024];
int32_t rows = 0;
const char* topicName = tmq_get_topic_name(pRes);
const char* dbName = tmq_get_db_name(pRes);
int32_t vgroupId = tmq_get_vgroup_id(pRes);
printf("topic: %s\n", topicName);
printf("db: %s\n", dbName);
printf("vgroup id: %d\n", vgroupId);
while (1) {
TAOS_ROW row = taos_fetch_row(pRes);
if (row == NULL) break;
fields = taos_fetch_fields(pRes);
numOfFields = taos_field_count(pRes);
precision = taos_result_precision(pRes);
rows++;
taos_print_row(buf, row, fields, numOfFields);
printf("precision: %d, row content: %s\n", precision, buf);
}
}
// return rows;
}
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
#pragma GCC diagnostic pop

View File

@ -1546,7 +1546,10 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
}
void colDataDestroy(SColumnInfoData* pColData) {
if (!pColData) return;
if (!pColData) {
return;
}
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
taosMemoryFreeClear(pColData->varmeta.offset);
} else {
@ -2525,8 +2528,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
pStart += sizeof(uint64_t);
if (pBlock->pDataBlock == NULL) {
pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
taosArraySetSize(pBlock->pDataBlock, numOfCols);
pBlock->pDataBlock = taosArrayInit_s(numOfCols, sizeof(SColumnInfoData), numOfCols);
}
for (int32_t i = 0; i < numOfCols; ++i) {

View File

@ -139,7 +139,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) {
nkv += tPutI16v(NULL, -pTColumn->colId);
nIdx++;
} else {
ASSERT(0);
if (ASSERTS(0, "invalid input")) {
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}
}
pTColumn = (++iTColumn < pTSchema->numOfCols) ? pTSchema->columns + iTColumn : NULL;
@ -176,8 +179,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) {
ntp = sizeof(SRow) + BIT2_SIZE(pTSchema->numOfCols - 1) + pTSchema->flen + ntp;
break;
default:
ASSERT(0);
break;
if (ASSERTS(0, "impossible")) {
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}
}
if (maxIdx <= UINT8_MAX) {
nkv = sizeof(SRow) + sizeof(SKVIdx) + nIdx + nkv;
@ -306,8 +311,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) {
pv = pf + pTSchema->flen;
break;
default:
ASSERT(0);
break;
if (ASSERTS(0, "impossible")) {
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}
}
if (pb) {
@ -370,7 +377,7 @@ _exit:
return code;
}
void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
ASSERT(iCol < pTSchema->numOfCols);
ASSERT(pRow->sver == pTSchema->version);
@ -381,17 +388,17 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
pColVal->type = pTColumn->type;
pColVal->flag = CV_FLAG_VALUE;
memcpy(&pColVal->value.val, &pRow->ts, sizeof(TSKEY));
return;
return 0;
}
if (pRow->flag == HAS_NONE) {
*pColVal = COL_VAL_NONE(pTColumn->colId, pTColumn->type);
return;
return 0;
}
if (pRow->flag == HAS_NULL) {
*pColVal = COL_VAL_NULL(pTColumn->colId, pTColumn->type);
return;
return 0;
}
if (pRow->flag >> 4) { // KV Row
@ -440,7 +447,7 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
memcpy(&pColVal->value.val, pData, pTColumn->bytes);
}
}
return;
return 0;
} else if (TABS(cid) < pTColumn->colId) {
lidx = mid + 1;
} else {
@ -492,16 +499,16 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
pv = pf + pTSchema->flen;
break;
default:
ASSERT(0);
break;
ASSERTS(0, "invalid row format");
return TSDB_CODE_IVLD_DATA_FMT;
}
if (bv == BIT_FLG_NONE) {
*pColVal = COL_VAL_NONE(pTColumn->colId, pTColumn->type);
return;
return 0;
} else if (bv == BIT_FLG_NULL) {
*pColVal = COL_VAL_NULL(pTColumn->colId, pTColumn->type);
return;
return 0;
}
pColVal->cid = pTColumn->colId;
@ -520,6 +527,8 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {
}
}
}
return 0;
}
void tRowDestroy(SRow *pRow) {
@ -710,7 +719,6 @@ int32_t tRowIterOpen(SRow *pRow, STSchema *pTSchema, SRowIter **ppIter) {
_exit:
if (code) {
*ppIter = NULL;
if (pIter) taosMemoryFree(pIter);
} else {
*ppIter = pIter;
}
@ -929,8 +937,8 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *
pv = pf + pTSchema->flen;
break;
default:
ASSERT(0);
break;
ASSERTS(0, "Invalid row flag");
return TSDB_CODE_IVLD_DATA_FMT;
}
while (pColData) {
@ -954,8 +962,8 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *
bv = GET_BIT2(pb, iTColumn - 1);
break;
default:
ASSERT(0);
break;
ASSERTS(0, "Invalid row flag");
return TSDB_CODE_IVLD_DATA_FMT;
}
if (bv == BIT_FLG_NONE) {
@ -1045,7 +1053,8 @@ static int32_t tRowKVUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aCo
} else if (pRow->flag & KV_FLG_BIG) {
pData = pv + ((uint32_t *)pKVIdx->idx)[iCol];
} else {
ASSERT(0);
ASSERTS(0, "Invalid KV row format");
return TSDB_CODE_IVLD_DATA_FMT;
}
int16_t cid;
@ -1579,7 +1588,7 @@ static FORCE_INLINE int32_t tColDataPutValue(SColData *pColData, uint8_t *pData,
int32_t code = 0;
if (IS_VAR_DATA_TYPE(pColData->type)) {
code = tRealloc((uint8_t **)(&pColData->aOffset), (pColData->nVal + 1) << 2);
code = tRealloc((uint8_t **)(&pColData->aOffset), ((int64_t)(pColData->nVal + 1)) << 2);
if (code) goto _exit;
pColData->aOffset[pColData->nVal] = pColData->nData;
@ -2312,35 +2321,25 @@ void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) {
}
uint8_t tColDataGetBitValue(const SColData *pColData, int32_t iVal) {
uint8_t v;
switch (pColData->flag) {
case HAS_NONE:
v = 0;
break;
return 0;
case HAS_NULL:
v = 1;
break;
return 1;
case (HAS_NULL | HAS_NONE):
v = GET_BIT1(pColData->pBitMap, iVal);
break;
return GET_BIT1(pColData->pBitMap, iVal);
case HAS_VALUE:
v = 2;
break;
return 2;
case (HAS_VALUE | HAS_NONE):
v = GET_BIT1(pColData->pBitMap, iVal);
if (v) v = 2;
break;
return (GET_BIT1(pColData->pBitMap, iVal)) ? 2 : 0;
case (HAS_VALUE | HAS_NULL):
v = GET_BIT1(pColData->pBitMap, iVal) + 1;
break;
return GET_BIT1(pColData->pBitMap, iVal) + 1;
case (HAS_VALUE | HAS_NULL | HAS_NONE):
v = GET_BIT2(pColData->pBitMap, iVal);
break;
return GET_BIT2(pColData->pBitMap, iVal);
default:
ASSERT(0);
break;
ASSERTS(0, "not possible");
return 0;
}
return v;
}
int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMalloc, void *arg) {

View File

@ -41,6 +41,7 @@ bool tsPrintAuth = false;
// queue & threads
int32_t tsNumOfRpcThreads = 1;
int32_t tsNumOfRpcSessions = 2000;
int32_t tsNumOfCommitThreads = 2;
int32_t tsNumOfTaskQueueThreads = 4;
int32_t tsNumOfMnodeQueryThreads = 4;
@ -54,7 +55,6 @@ int32_t tsNumOfQnodeQueryThreads = 4;
int32_t tsNumOfQnodeFetchThreads = 1;
int32_t tsNumOfSnodeStreamThreads = 4;
int32_t tsNumOfSnodeWriteThreads = 1;
// sync raft
int32_t tsElectInterval = 25 * 1000;
int32_t tsHeartbeatInterval = 1000;
@ -140,6 +140,7 @@ int32_t tsMaxMemUsedByInsert = 1024;
float tsSelectivityRatio = 1.0;
int32_t tsTagFilterResCacheSize = 1024 * 10;
char tsTagFilterCache = 0;
// the maximum allowed query buffer size during query processing for each data node.
// -1 no limit (default)
@ -188,6 +189,7 @@ int32_t tsGrantHBInterval = 60;
int32_t tsUptimeInterval = 300; // seconds
char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits
char tsUdfdLdLibPath[512] = "";
bool tsDisableStream = false;
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) {
@ -349,6 +351,7 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "AVX2", tsAVX2Enable, 0) != 0) return -1;
if (cfgAddBool(pCfg, "FMA", tsFMAEnable, 0) != 0) return -1;
if (cfgAddBool(pCfg, "SIMD-builtins", tsSIMDBuiltins, 0) != 0) return -1;
if (cfgAddBool(pCfg, "tagFilterCache", tsTagFilterCache, 0) != 0) return -1;
if (cfgAddInt64(pCfg, "openMax", tsOpenMax, 0, INT64_MAX, 1) != 0) return -1;
#if !defined(_ALPINE)
@ -388,9 +391,12 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, 0) != 0) return -1;
tsNumOfRpcThreads = tsNumOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS);
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS);
if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1;
tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000);
if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, 0) != 0) return -1;
tsNumOfCommitThreads = tsNumOfCores / 2;
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1;
@ -467,6 +473,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddString(pCfg, "udfdResFuncs", tsUdfdResFuncs, 0) != 0) return -1;
if (cfgAddString(pCfg, "udfdLdLibPath", tsUdfdLdLibPath, 0) != 0) return -1;
if (cfgAddBool(pCfg, "disableStream", tsDisableStream, 0) != 0) return -1;
GRANT_CFG_ADD;
return 0;
}
@ -496,11 +504,19 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfRpcThreads = numOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS);
pItem->i32 = tsNumOfRpcThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfRpcSessions");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfRpcSessions = 2000;
tsNumOfRpcSessions = TRANGE(tsNumOfRpcSessions, 100, 10000);
pItem->i32 = tsNumOfRpcSessions;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfCommitThreads = numOfCores / 2;
@ -718,6 +734,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval;
tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32;
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
@ -731,6 +748,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
tsSIMDBuiltins = (bool)cfgGetItem(pCfg, "SIMD-builtins")->bval;
tsTagFilterCache = (bool)cfgGetItem(pCfg, "tagFilterCache")->bval;
tsEnableMonitor = cfgGetItem(pCfg, "monitor")->bval;
tsMonitorInterval = cfgGetItem(pCfg, "monitorInterval")->i32;
@ -767,6 +785,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
if (tsQueryBufferSize >= 0) {
tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL;
}
tsDisableStream = cfgGetItem(pCfg, "disableStream")->bval;
GRANT_CFG_GET;
return 0;
}
@ -973,6 +994,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
} else if (strcasecmp("numOfRpcThreads", name) == 0) {
tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32;
} else if (strcasecmp("numOfRpcSessions", name) == 0) {
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
} else if (strcasecmp("numOfCommitThreads", name) == 0) {
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
} else if (strcasecmp("numOfMnodeReadThreads", name) == 0) {

View File

@ -5592,6 +5592,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
if (tEncodeI16(&encoder, pCol->colId) < 0) return -1;
if (tEncodeI8(&encoder, pCol->type) < 0) return -1;
}
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igUpdate) < 0) return -1;
tEndEncode(&encoder);
@ -5676,6 +5677,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
}
}
if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igUpdate) < 0) return -1;
tEndDecode(&decoder);

View File

@ -280,10 +280,19 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
rpcInit.retryMaxTimouet = tsMaxRetryWaitTime;
rpcInit.failFastInterval = 1000; // interval threshold(ms)
rpcInit.failFastInterval = 5000; // interval threshold(ms)
rpcInit.failFastThreshold = 3; // failed threshold
rpcInit.ffp = dmFailFastFp;
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3);
connLimitNum = TMAX(connLimitNum, 10);
connLimitNum = TMIN(connLimitNum, 500);
rpcInit.connLimitNum = connLimitNum;
rpcInit.connLimitLock = 1;
rpcInit.supportBatch = 1;
rpcInit.batchSize = 8 * 1024;
pTrans->clientRpc = rpcOpen(&rpcInit);
if (pTrans->clientRpc == NULL) {
dError("failed to init dnode rpc client");

View File

@ -402,7 +402,7 @@ static int32_t dmDecodeEpPairs(SJson *pJson, SDnodeData *pData) {
int32_t code = 0;
SJson *dnodes = tjsonGetObjectItem(pJson, "dnodes");
if (dnodes == NULL) return 0;
if (dnodes == NULL) return -1;
int32_t numOfDnodes = tjsonGetArraySize(dnodes);
for (int32_t i = 0; i < numOfDnodes; ++i) {

View File

@ -146,7 +146,9 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
// 3.0.20
if (sver >= 2) {
if (tDecodeI64(pDecoder, &pObj->checkpointFreq) < 0) return -1;
if (tDecodeI8(pDecoder, &pObj->igCheckUpdate) < 0) return -1;
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI8(pDecoder, &pObj->igCheckUpdate) < 0) return -1;
}
}
tEndDecode(pDecoder);
return 0;
@ -415,19 +417,21 @@ void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp) {
return (void *)buf;
}
SMqSubscribeObj *tNewSubscribeObj(const char key[TSDB_SUBSCRIBE_KEY_LEN]) {
SMqSubscribeObj *pSubNew = taosMemoryCalloc(1, sizeof(SMqSubscribeObj));
if (pSubNew == NULL) return NULL;
memcpy(pSubNew->key, key, TSDB_SUBSCRIBE_KEY_LEN);
taosInitRWLatch(&pSubNew->lock);
pSubNew->vgNum = 0;
pSubNew->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
SMqSubscribeObj *tNewSubscribeObj(const char* key) {
SMqSubscribeObj *pSubObj = taosMemoryCalloc(1, sizeof(SMqSubscribeObj));
if (pSubObj == NULL) {
return NULL;
}
memcpy(pSubObj->key, key, TSDB_SUBSCRIBE_KEY_LEN);
taosInitRWLatch(&pSubObj->lock);
pSubObj->vgNum = 0;
pSubObj->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
// TODO set hash free fp
/*taosHashSetFreeFp(pSubNew->consumerHash, tDeleteSMqConsumerEp);*/
pSubNew->unassignedVgs = taosArrayInit(0, sizeof(void *));
return pSubNew;
/*taosHashSetFreeFp(pSubObj->consumerHash, tDeleteSMqConsumerEp);*/
pSubObj->unassignedVgs = taosArrayInit(0, POINTER_BYTES);
return pSubObj;
}
SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) {

View File

@ -882,6 +882,12 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (strcasecmp(cfgReq.config, "resetlog") == 0) {
strcpy(dcfgReq.config, "resetlog");
} else if (strncasecmp(cfgReq.config, "monitor", 7) == 0) {
if (' ' != cfgReq.config[7] && 0 != cfgReq.config[7]) {
mError("dnode:%d, failed to config monitor since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
terrno = TSDB_CODE_INVALID_CFG;
return -1;
}
const char *value = cfgReq.value;
int32_t flag = atoi(value);
if (flag <= 0) {
@ -902,12 +908,18 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
int32_t optLen = strlen(optName);
if (strncasecmp(cfgReq.config, optName, optLen) != 0) continue;
if (' ' != cfgReq.config[optLen] && 0 != cfgReq.config[optLen]) {
mError("dnode:%d, failed to config since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
terrno = TSDB_CODE_INVALID_CFG;
return -1;
}
const char *value = cfgReq.value;
int32_t flag = atoi(value);
if (flag <= 0) {
flag = atoi(cfgReq.config + optLen + 1);
}
if (flag <= 0 || flag > 255) {
if (flag < 0 || flag > 255) {
mError("dnode:%d, failed to config %s since value:%d", cfgReq.dnodeId, optName, flag);
terrno = TSDB_CODE_INVALID_CFG;
return -1;

View File

@ -638,7 +638,7 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
}
int32_t mndAddIndexImpl(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, SIdxObj *pIdx) {
// impl later
int32_t code = 0;
int32_t code = -1;
SStbObj newStb = {0};
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb-index");
if (pTrans == NULL) goto _OVER;
@ -670,6 +670,7 @@ _OVER:
}
static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) {
int32_t code = -1;
SIdxObj idxObj = {0};
memcpy(idxObj.name, req->idxName, TSDB_TABLE_FNAME_LEN);
memcpy(idxObj.stb, pStb->name, TSDB_TABLE_FNAME_LEN);
@ -681,21 +682,6 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re
idxObj.stbUid = pStb->uid;
idxObj.dbUid = pStb->dbUid;
int32_t code = -1;
// SField *pField0 = NULL;
// SStbObj stbObj = {0};
// SStbObj *pNew = &stbObj;
// taosRLockLatch(&pOld->lock);
// memcpy(&stbObj, pOld, sizeof(SStbObj));
// taosRUnLockLatch(&pOld->lock);
// stbObj.pColumns = NULL;
// stbObj.pTags = NULL;
// stbObj.updateTime = taosGetTimestampMs();
// stbObj.lock = 0;
int32_t tag = mndFindSuperTableTagId(pStb, req->colName);
if (tag < 0) {
terrno = TSDB_CODE_MND_TAG_NOT_EXIST;

View File

@ -133,6 +133,7 @@ static void mndCalMqRebalance(SMnode *pMnode) {
}
}
#if 0
static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) {
int32_t contLen = 0;
void *pReq = mndBuildCheckpointTickMsg(&contLen, sec);
@ -145,6 +146,7 @@ static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) {
tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg);
}
}
#endif
static void mndPullupTelem(SMnode *pMnode) {
mTrace("pullup telem msg");

View File

@ -39,7 +39,7 @@ static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq);
static int32_t mndProcessDropStreamReq(SRpcMsg *pReq);
static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq);
static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq);
// static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq);
/*static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);*/
static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq);
static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta);
@ -66,8 +66,8 @@ int32_t mndInitStream(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DEPLOY_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DROP_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_MND_STREAM_CHECKPOINT_TIMER, mndProcessStreamCheckpointTmr);
mndSetMsgHandle(pMnode, TDMT_MND_STREAM_BEGIN_CHECKPOINT, mndProcessStreamDoCheckpoint);
// mndSetMsgHandle(pMnode, TDMT_MND_STREAM_CHECKPOINT_TIMER, mndProcessStreamCheckpointTmr);
// mndSetMsgHandle(pMnode, TDMT_MND_STREAM_BEGIN_CHECKPOINT, mndProcessStreamDoCheckpoint);
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_REPORT_CHECKPOINT, mndTransProcessRsp);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_STREAMS, mndRetrieveStream);
@ -297,6 +297,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
pObj->triggerParam = pCreate->maxDelay;
pObj->watermark = pCreate->watermark;
pObj->fillHistory = pCreate->fillHistory;
pObj->deleteMark = pCreate->deleteMark;
pObj->igCheckUpdate = pCreate->igUpdate;
memcpy(pObj->sourceDb, pCreate->sourceDB, TSDB_DB_FNAME_LEN);
@ -342,9 +343,9 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
}
int32_t numOfNULL = taosArrayGetSize(pCreate->fillNullCols);
if(numOfNULL > 0) {
if (numOfNULL > 0) {
pObj->outputSchema.nCols += numOfNULL;
SSchema* pFullSchema = taosMemoryCalloc(pObj->outputSchema.nCols, sizeof(SSchema));
SSchema *pFullSchema = taosMemoryCalloc(pObj->outputSchema.nCols, sizeof(SSchema));
if (!pFullSchema) {
goto FAIL;
}
@ -352,10 +353,10 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
int32_t nullIndex = 0;
int32_t dataIndex = 0;
for (int16_t i = 0; i < pObj->outputSchema.nCols; i++) {
SColLocation* pos = taosArrayGet(pCreate->fillNullCols, nullIndex);
SColLocation *pos = taosArrayGet(pCreate->fillNullCols, nullIndex);
if (i < pos->slotId) {
pFullSchema[i].bytes = pObj->outputSchema.pSchema[dataIndex].bytes;
pFullSchema[i].colId = i + 1; // pObj->outputSchema.pSchema[dataIndex].colId;
pFullSchema[i].colId = i + 1; // pObj->outputSchema.pSchema[dataIndex].colId;
pFullSchema[i].flags = pObj->outputSchema.pSchema[dataIndex].flags;
strcpy(pFullSchema[i].name, pObj->outputSchema.pSchema[dataIndex].name);
pFullSchema[i].type = pObj->outputSchema.pSchema[dataIndex].type;
@ -380,6 +381,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
.triggerType = pObj->trigger == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : pObj->trigger,
.watermark = pObj->watermark,
.igExpired = pObj->igExpired,
.deleteMark = pObj->deleteMark,
.igCheckUpdate = pObj->igCheckUpdate,
};
@ -505,9 +507,8 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
SMCreateStbReq createReq = {0};
tstrncpy(createReq.name, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
createReq.numOfColumns = pStream->outputSchema.nCols;
createReq.pColumns = taosArrayInit(createReq.numOfColumns, sizeof(SField));
createReq.pColumns = taosArrayInit_s(createReq.numOfColumns, sizeof(SField), createReq.numOfColumns);
// build fields
taosArraySetSize(createReq.pColumns, createReq.numOfColumns);
for (int32_t i = 0; i < createReq.numOfColumns; i++) {
SField *pField = taosArrayGet(createReq.pColumns, i);
tstrncpy(pField->name, pStream->outputSchema.pSchema[i].name, TSDB_COL_NAME_LEN);
@ -518,8 +519,7 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
if (pStream->tagSchema.nCols == 0) {
createReq.numOfTags = 1;
createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField));
taosArraySetSize(createReq.pTags, createReq.numOfTags);
createReq.pTags = taosArrayInit_s(createReq.numOfTags, sizeof(SField), 1);
// build tags
SField *pField = taosArrayGet(createReq.pTags, 0);
strcpy(pField->name, "group_id");
@ -528,8 +528,7 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
pField->bytes = 8;
} else {
createReq.numOfTags = pStream->tagSchema.nCols;
createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField));
taosArraySetSize(createReq.pTags, createReq.numOfTags);
createReq.pTags = taosArrayInit_s(createReq.numOfTags, sizeof(SField), createReq.numOfTags);
for (int32_t i = 0; i < createReq.numOfTags; i++) {
SField *pField = taosArrayGet(createReq.pTags, i);
pField->bytes = pStream->tagSchema.pSchema[i].bytes;
@ -726,7 +725,8 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
// create stb for stream
if (createStreamReq.createStb == STREAM_CREATE_STABLE_TRUE && mndCreateStbForStream(pMnode, pTrans, &streamObj, pReq->info.conn.user) < 0) {
if (createStreamReq.createStb == STREAM_CREATE_STABLE_TRUE &&
mndCreateStbForStream(pMnode, pTrans, &streamObj, pReq->info.conn.user) < 0) {
mError("trans:%d, failed to create stb for stream %s since %s", pTrans->id, createStreamReq.name, terrstr());
mndTransDrop(pTrans);
goto _OVER;
@ -778,6 +778,9 @@ _OVER:
tFreeStreamObj(&streamObj);
return code;
}
#if 0
static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
@ -942,6 +945,8 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
return 0;
}
#endif
static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SStreamObj *pStream = NULL;

View File

@ -966,7 +966,9 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
while (numOfRows < rowsCapacity) {
pShow->pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pShow->pIter, (void **)&pSub);
if (pShow->pIter == NULL) break;
if (pShow->pIter == NULL) {
break;
}
taosRLockLatch(&pSub->lock);
@ -1075,6 +1077,9 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
}
#endif
pBlock->info.rows = numOfRows;
taosRUnLockLatch(&pSub->lock);
sdbRelease(pSdb, pSub);
}

View File

@ -107,8 +107,8 @@ void metaReaderClear(SMetaReader *pReader);
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int32_t metaGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid);
int metaGetTableEntryByName(SMetaReader *pReader, const char *name);
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags);
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags);
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList);
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList);
int32_t metaReadNext(SMetaReader *pReader);
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);

View File

@ -244,6 +244,7 @@ void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF,
void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]);
void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]);
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]);
// SDelFile
void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]);
// tsdbFS.c ==============================================================================================
@ -687,6 +688,7 @@ typedef struct SSttBlockLoadInfo {
int16_t *colIds;
int32_t numOfCols;
bool sttBlockLoaded;
int32_t numOfStt;
// keep the last access position, this position may be used to reduce the binary times for
// starting last block data for a new table
@ -752,7 +754,7 @@ bool tMergeTreeNext(SMergeTree *pMTree);
TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
void tMergeTreeClose(SMergeTree *pMTree);
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols);
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfStt);
void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el);
void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);

View File

@ -122,7 +122,7 @@ typedef struct STbUidStore STbUidStore;
#define META_BEGIN_HEAP_NIL 2
int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback);
int metaClose(SMeta* pMeta);
int metaClose(SMeta** pMeta);
int metaBegin(SMeta* pMeta, int8_t fromSys);
TXN* metaGetTxn(SMeta* pMeta);
int metaCommit(SMeta* pMeta, TXN* txn);

View File

@ -32,9 +32,9 @@ typedef struct SMetaStbStatsEntry {
} SMetaStbStatsEntry;
typedef struct STagFilterResEntry {
uint64_t suid; // uid for super table
SList list; // the linked list of md5 digest, extracted from the serialized tag query condition
uint32_t qTimes; // queried times for current super table
SList list; // the linked list of md5 digest, extracted from the serialized tag query condition
uint32_t hitTimes; // queried times for current super table
uint32_t accTime;
} STagFilterResEntry;
struct SMetaCache {
@ -55,6 +55,7 @@ struct SMetaCache {
// query cache
struct STagFilterResCache {
TdThreadMutex lock;
uint32_t accTimes;
SHashObj* pTableEntry;
SLRUCache* pUidResCache;
} sTagFilterResCache;
@ -132,6 +133,7 @@ int32_t metaCacheOpen(SMeta* pMeta) {
goto _err2;
}
pCache->sTagFilterResCache.accTimes = 0;
pCache->sTagFilterResCache.pTableEntry =
taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK);
if (pCache->sTagFilterResCache.pTableEntry == NULL) {
@ -159,9 +161,9 @@ void metaCacheClose(SMeta* pMeta) {
entryCacheClose(pMeta);
statsCacheClose(pMeta);
taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry);
taosLRUCacheCleanup(pMeta->pCache->sTagFilterResCache.pUidResCache);
taosThreadMutexDestroy(&pMeta->pCache->sTagFilterResCache.lock);
taosHashCleanup(pMeta->pCache->sTagFilterResCache.pTableEntry);
taosMemoryFree(pMeta->pCache);
pMeta->pCache = NULL;
@ -427,6 +429,32 @@ int32_t metaStatsCacheGet(SMeta* pMeta, int64_t uid, SMetaStbStats* pInfo) {
return code;
}
static int checkAllEntriesInCache(const STagFilterResEntry* pEntry, SArray* pInvalidRes, int32_t keyLen,
SLRUCache* pCache, uint64_t suid) {
SListIter iter = {0};
tdListInitIter((SList*)&(pEntry->list), &iter, TD_LIST_FORWARD);
SListNode* pNode = NULL;
uint64_t buf[3];
buf[0] = suid;
int32_t len = sizeof(uint64_t) * tListLen(buf);
while ((pNode = tdListNext(&iter)) != NULL) {
memcpy(&buf[1], pNode->data, keyLen);
// check whether it is existed in LRU cache, and remove it from linked list if not.
LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len);
if (pRes == NULL) { // remove the item in the linked list
taosArrayPush(pInvalidRes, &pNode);
} else {
taosLRUCacheRelease(pCache, pRes, false);
}
}
return 0;
}
int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
bool* acquireRes) {
// generate the composed key for LRU cache
@ -434,16 +462,18 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
SHashObj* pTableMap = pMeta->pCache->sTagFilterResCache.pTableEntry;
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
uint64_t buf[3] = {0};
uint32_t times = 0;
uint64_t buf[4];
*acquireRes = 0;
buf[0] = suid;
memcpy(&buf[1], pKey, keyLen);
buf[0] = (uint64_t)pTableMap;
buf[1] = suid;
memcpy(&buf[2], pKey, keyLen);
taosThreadMutexLock(pLock);
pMeta->pCache->sTagFilterResCache.accTimes += 1;
int32_t len = keyLen + sizeof(uint64_t);
int32_t len = keyLen + sizeof(uint64_t) * 2;
LRUHandle* pHandle = taosLRUCacheLookup(pCache, buf, len);
if (pHandle == NULL) {
taosThreadMutexUnlock(pLock);
@ -465,48 +495,17 @@ int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pK
// set the result into the buffer
taosArrayAddBatch(pList1, p + sizeof(int32_t), size);
times = atomic_add_fetch_32(&(*pEntry)->qTimes, 1);
(*pEntry)->hitTimes += 1;
uint32_t acc = pMeta->pCache->sTagFilterResCache.accTimes;
if ((*pEntry)->hitTimes % 5000 == 0 && (*pEntry)->hitTimes > 0) {
metaInfo("cache hit:%d, total acc:%d, rate:%.2f", (*pEntry)->hitTimes, acc, ((double)(*pEntry)->hitTimes) / acc);
}
taosLRUCacheRelease(pCache, pHandle, false);
// unlock meta
taosThreadMutexUnlock(pLock);
// check if scanning all items are necessary or not
if (times >= 5000 && TD_DLIST_NELES(&(*pEntry)->list) > 10) {
taosThreadMutexLock(pLock);
SArray* pInvalidRes = taosArrayInit(64, POINTER_BYTES);
SListIter iter = {0};
tdListInitIter(&(*pEntry)->list, &iter, TD_LIST_FORWARD);
SListNode* pNode = NULL;
while ((pNode = tdListNext(&iter)) != NULL) {
memcpy(&buf[1], pNode->data, keyLen);
// check whether it is existed in LRU cache, and remove it from linked list if not.
LRUHandle* pRes = taosLRUCacheLookup(pCache, buf, len);
if (pRes == NULL) { // remove the item in the linked list
taosArrayPush(pInvalidRes, &pNode);
} else {
taosLRUCacheRelease(pCache, pRes, false);
}
}
// remove the keys, of which query uid lists have been replaced already.
size_t s = taosArrayGetSize(pInvalidRes);
for (int32_t i = 0; i < s; ++i) {
SListNode** p1 = taosArrayGet(pInvalidRes, i);
tdListPopNode(&(*pEntry)->list, *p1);
taosMemoryFree(*p1);
}
atomic_store_32(&(*pEntry)->qTimes, 0); // reset the query times
taosArrayDestroy(pInvalidRes);
taosThreadMutexUnlock(pLock);
}
return TSDB_CODE_SUCCESS;
}
@ -514,9 +513,53 @@ static void freePayload(const void* key, size_t keyLen, void* value) {
if (value == NULL) {
return;
}
const uint64_t* p = key;
if (keyLen != sizeof(int64_t) * 4) {
metaError("key length is invalid, length:%d, expect:%d", (int32_t)keyLen, (int32_t)sizeof(uint64_t) * 2);
return;
}
SHashObj* pHashObj = (SHashObj*)p[0];
STagFilterResEntry** pEntry = taosHashGet(pHashObj, &p[1], sizeof(uint64_t));
{
int64_t st = taosGetTimestampUs();
SListIter iter = {0};
tdListInitIter((SList*)&((*pEntry)->list), &iter, TD_LIST_FORWARD);
SListNode* pNode = NULL;
while ((pNode = tdListNext(&iter)) != NULL) {
uint64_t* digest = (uint64_t*)pNode->data;
if (digest[0] == p[2] && digest[1] == p[3]) {
void* tmp = tdListPopNode(&((*pEntry)->list), pNode);
taosMemoryFree(tmp);
int64_t et = taosGetTimestampUs();
metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
(et - st) / 1000.0);
break;
}
}
}
taosMemoryFree(value);
}
static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyLen, uint64_t suid) {
STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry));
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
p->hitTimes = 0;
tdListInit(&p->list, keyLen);
taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES);
tdListAppend(&p->list, pKey);
return 0;
}
// check both the payload size and selectivity ratio
int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio) {
@ -540,45 +583,61 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int
SHashObj* pTableEntry = pMeta->pCache->sTagFilterResCache.pTableEntry;
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
// the format of key:
// hash table address(8bytes) + suid(8bytes) + MD5 digest(16bytes)
uint64_t buf[4] = {0};
buf[0] = (uint64_t)pTableEntry;
buf[1] = suid;
memcpy(&buf[2], pKey, keyLen);
ASSERT(keyLen == 16);
int32_t code = 0;
taosThreadMutexLock(pLock);
STagFilterResEntry** pEntry = taosHashGet(pTableEntry, &suid, sizeof(uint64_t));
if (pEntry == NULL) {
STagFilterResEntry* p = taosMemoryMalloc(sizeof(STagFilterResEntry));
p->qTimes = 0;
tdListInit(&p->list, keyLen);
taosHashPut(pTableEntry, &suid, sizeof(uint64_t), &p, POINTER_BYTES);
tdListAppend(&p->list, pKey);
code = addNewEntry(pTableEntry, pKey, keyLen, suid);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
} else {
tdListAppend(&(*pEntry)->list, pKey);
}
uint64_t buf[3] = {0};
buf[0] = suid;
memcpy(&buf[1], pKey, keyLen);
if (sizeof(uint64_t) + keyLen != 24) {
metaError("meta/cache: incorrect keyLen:%" PRId32 " length.", keyLen);
return TSDB_CODE_FAILED;
// check if it exists or not
size_t size = listNEles(&(*pEntry)->list);
if (size == 0) {
tdListAppend(&(*pEntry)->list, pKey);
} else {
SListNode* pNode = listHead(&(*pEntry)->list);
uint64_t* p = (uint64_t*)pNode->data;
if (p[1] == ((uint64_t*)pKey)[1] && p[0] == ((uint64_t*)pKey)[0]) {
// we have already found the existed items, no need to added to cache anymore.
taosThreadMutexUnlock(pLock);
return TSDB_CODE_SUCCESS;
} else { // not equal, append it
tdListAppend(&(*pEntry)->list, pKey);
}
}
}
// add to cache.
int32_t ret = taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) * 2 + keyLen, pPayload, payloadLen, freePayload, NULL,
TAOS_LRU_PRIORITY_LOW);
_end:
taosThreadMutexUnlock(pLock);
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d, ret:%d", TD_VID(pMeta->pVnode),
suid, (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry), ret);
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid,
(int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry));
return TSDB_CODE_SUCCESS;
return code;
}
// remove the lru cache that are expired due to the tags value update, or creating, or dropping, of child tables
int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
int32_t keyLen = sizeof(uint64_t) * 3;
uint64_t p[3] = {0};
p[0] = suid;
uint64_t p[4] = {0};
p[0] = (uint64_t)pMeta->pCache->sTagFilterResCache.pTableEntry;
p[1] = suid;
TdThreadMutex* pLock = &pMeta->pCache->sTagFilterResCache.lock;
@ -594,11 +653,11 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
SListNode* pNode = NULL;
while ((pNode = tdListNext(&iter)) != NULL) {
memcpy(&p[1], pNode->data, 16);
memcpy(&p[2], pNode->data, 16);
taosLRUCacheErase(pMeta->pCache->sTagFilterResCache.pUidResCache, p, keyLen);
}
(*pEntry)->qTimes = 0;
(*pEntry)->hitTimes = 0;
tdListEmpty(&(*pEntry)->list);
taosThreadMutexUnlock(pLock);

View File

@ -201,7 +201,8 @@ _err:
return -1;
}
int metaClose(SMeta *pMeta) {
int metaClose(SMeta **ppMeta) {
SMeta *pMeta = *ppMeta;
if (pMeta) {
if (pMeta->pEnv) metaAbort(pMeta);
if (pMeta->pCache) metaCacheClose(pMeta);
@ -221,7 +222,8 @@ int metaClose(SMeta *pMeta) {
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
metaDestroyLock(pMeta);
taosMemoryFree(pMeta);
taosMemoryFreeClear(*ppMeta);
}
return 0;

View File

@ -1375,13 +1375,14 @@ static int32_t metaGetTableTagByUid(SMeta *pMeta, int64_t suid, int64_t uid, voi
return ret;
}
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHashObj *tags) {
const int32_t LIMIT = 4096;
int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) {
const int32_t LIMIT = 128;
int32_t isLock = false;
int32_t sz = uidList ? taosArrayGetSize(uidList) : 0;
for (int i = 0; i < sz; i++) {
tb_uid_t *id = taosArrayGet(uidList, i);
STUidTagInfo *p = taosArrayGet(uidList, i);
if (i % LIMIT == 0) {
if (isLock) metaULock(pMeta);
@ -1390,52 +1391,73 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList, SHas
isLock = true;
}
if (taosHashGet(tags, id, sizeof(tb_uid_t)) == NULL) {
void *val = NULL;
int32_t len = 0;
if (metaGetTableTagByUid(pMeta, suid, *id, &val, &len, false) == 0) {
taosHashPut(tags, id, sizeof(tb_uid_t), val, len);
tdbFree(val);
} else {
metaError("vgId:%d, failed to table IDs, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid,
*id);
}
// if (taosHashGet(tags, &p->uid, sizeof(tb_uid_t)) == NULL) {
void *val = NULL;
int32_t len = 0;
if (metaGetTableTagByUid(pMeta, suid, p->uid, &val, &len, false) == 0) {
p->pTagVal = taosMemoryMalloc(len);
memcpy(p->pTagVal, val, len);
tdbFree(val);
} else {
metaError("vgId:%d, failed to table tags, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid,
p->uid);
}
}
// }
if (isLock) metaULock(pMeta);
return 0;
}
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) {
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *pUidTagInfo) {
SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid, 1);
SHashObj *uHash = NULL;
size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids
if (len > 0) {
uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < len; i++) {
int64_t *uid = taosArrayGet(uidList, i);
taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
// If len > 0 means there already have uids, and we only want the
// tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept
// in the hash map, that may require a lot of memory
SHashObj *pSepecifiedUidMap = NULL;
size_t numOfElems = taosArrayGetSize(pUidTagInfo);
if (numOfElems > 0) {
pSepecifiedUidMap =
taosHashInit(numOfElems / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < numOfElems; i++) {
STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, i);
taosHashPut(pSepecifiedUidMap, &pTagInfo->uid, sizeof(uint64_t), &i, sizeof(int32_t));
}
}
while (1) {
tb_uid_t id = metaCtbCursorNext(pCur);
if (id == 0) {
break;
}
if (numOfElems == 0) { // all data needs to be added into the pUidTagInfo list
while (1) {
tb_uid_t uid = metaCtbCursorNext(pCur);
if (uid == 0) {
break;
}
if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) {
continue;
} else if (len == 0) {
taosArrayPush(uidList, &id);
STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal};
info.pTagVal = taosMemoryMalloc(pCur->vLen);
memcpy(info.pTagVal, pCur->pVal, pCur->vLen);
taosArrayPush(pUidTagInfo, &info);
}
} else { // only the specified tables need to be added
while (1) {
tb_uid_t uid = metaCtbCursorNext(pCur);
if (uid == 0) {
break;
}
taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen);
int32_t *index = taosHashGet(pSepecifiedUidMap, &uid, sizeof(uint64_t));
if (index == NULL) {
continue;
}
STUidTagInfo *pTagInfo = taosArrayGet(pUidTagInfo, *index);
if (pTagInfo->pTagVal == NULL) {
pTagInfo->pTagVal = taosMemoryMalloc(pCur->vLen);
memcpy(pTagInfo->pTagVal, pCur->pVal, pCur->vLen);
}
}
}
taosHashCleanup(uHash);
taosHashCleanup(pSepecifiedUidMap);
metaCloseCtbCursor(pCur, 1);
return TSDB_CODE_SUCCESS;
}

View File

@ -325,7 +325,7 @@ _exit:
}
static int32_t tdRSmaFSScanAndTryFix(SSma *pSma) {
int32_t code = 0;
int32_t code = 0;
#if 0
int32_t lino = 0;
SVnode *pVnode = pSma->pVnode;
@ -559,7 +559,7 @@ int32_t tdRSmaFSRef(SSma *pSma, SRSmaFS *pFS) {
SRSmaFS *qFS = RSMA_FS(pStat);
int32_t size = taosArrayGetSize(qFS->aQTaskInf);
pFS->aQTaskInf = taosArrayInit(size, sizeof(SQTaskFile));
pFS->aQTaskInf = taosArrayInit_s(size, sizeof(SQTaskFile), size);
if (pFS->aQTaskInf == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
@ -574,7 +574,6 @@ int32_t tdRSmaFSRef(SSma *pSma, SRSmaFS *pFS) {
}
}
taosArraySetSize(pFS->aQTaskInf, size);
memcpy(pFS->aQTaskInf->pData, qFS->aQTaskInf->pData, size * sizeof(SQTaskFile));
_exit:
@ -640,9 +639,7 @@ int32_t tdRSmaFSCopy(SSma *pSma, SRSmaFS *pFS) {
code = tdRSmaFSCreate(pFS, size);
TSDB_CHECK_CODE(code, lino, _exit);
taosArraySetSize(pFS->aQTaskInf, size);
memcpy(pFS->aQTaskInf->pData, qFS->aQTaskInf->pData, size * sizeof(SQTaskFile));
taosArrayAddBatch(pFS->aQTaskInf, qFS->aQTaskInf->pData, size);
_exit:
if (code) {

View File

@ -106,7 +106,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
return NULL;
}
if (streamLoadTasks(pTq->pStreamMeta) < 0) {
if (streamLoadTasks(pTq->pStreamMeta, walGetCommittedVer(pVnode->pWal)) < 0) {
return NULL;
}
@ -849,12 +849,9 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
pHandle->execHandle.task =
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols, NULL);
/*A(pHandle->execHandle.task);*/
void* scanner = NULL;
qExtractStreamScanner(pHandle->execHandle.task, &scanner);
/*A(scanner);*/
pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
/*A(pHandle->execHandle.pExecReader);*/
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
@ -887,6 +884,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
return -1;
}
} else {
// TODO handle qmsg and exec modification
@ -898,6 +896,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
qStreamCloseTsdbReader(pHandle->execHandle.task);
}
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
return -1;
}
// close handle
}
@ -1216,6 +1215,9 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t version, char* msg, int32_t m
return -1;
}
atomic_store_8(&pTask->fillHistory, 0);
streamMetaSaveTask(pTq->pStreamMeta, pTask);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;

View File

@ -209,6 +209,8 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
tEncoderInit(&encoder, buf, vlen);
if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
@ -216,18 +218,26 @@ int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
if (tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, txn) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
if (tdbCommit(pTq->pMetaDB, txn) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
if (tdbPostCommit(pTq->pMetaDB, txn) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}

View File

@ -308,7 +308,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
taosWUnLockLatch(&pTq->pushLock);
}
if (vnodeIsRoleLeader(pTq->pVnode)) {
if (!tsDisableStream && vnodeIsRoleLeader(pTq->pVnode)) {
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
if (msgType == TDMT_VND_SUBMIT) {
void* data = taosMemoryMalloc(len);

View File

@ -17,7 +17,7 @@
static int32_t tsdbOpenBICache(STsdb *pTsdb) {
int32_t code = 0;
SLRUCache *pCache = taosLRUCacheInit(5 * 1024 * 1024, -1, .5);
SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5);
if (pCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@ -48,7 +48,7 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
SLRUCache *pCache = NULL;
size_t cfgCapacity = pTsdb->pVnode->config.cacheLastSize * 1024 * 1024;
pCache = taosLRUCacheInit(cfgCapacity, -1, .5);
pCache = taosLRUCacheInit(cfgCapacity, 1, .5);
if (pCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@ -278,6 +278,11 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TS
goto _invalidate;
}
if (nCol != pTSchema->numOfCols) {
invalidate = true;
goto _invalidate;
}
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (keyTs > tTsVal->ts) {
STColumn *pTColumn = &pTSchema->columns[0];
@ -293,6 +298,12 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TS
SColVal colVal = {0};
tsdbRowGetColVal(row, pTSchema, iCol, &colVal);
if (colVal.cid != tColVal->cid) {
invalidate = true;
goto _invalidate;
}
if (!COL_VAL_IS_NONE(&colVal)) {
if (keyTs == tTsVal1->ts && !COL_VAL_IS_NONE(tColVal)) {
invalidate = true;
@ -302,7 +313,8 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TS
SLastCol lastCol = {.ts = keyTs, .colVal = colVal};
if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData)
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
@ -387,6 +399,11 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb
goto _invalidate;
}
if (nCol != pTSchema->numOfCols) {
invalidate = true;
goto _invalidate;
}
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (keyTs > tTsVal->ts) {
STColumn *pTColumn = &pTSchema->columns[0];
@ -402,6 +419,12 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb
SColVal colVal = {0};
tsdbRowGetColVal(row, pTSchema, iCol, &colVal);
if (colVal.cid != tColVal->cid) {
invalidate = true;
goto _invalidate;
}
if (COL_VAL_IS_VALUE(&colVal)) {
if (keyTs == tTsVal1->ts && COL_VAL_IS_VALUE(tColVal)) {
invalidate = true;
@ -411,7 +434,8 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb
SLastCol lastCol = {.ts = keyTs, .colVal = colVal};
if (IS_VAR_DATA_TYPE(colVal.type) && colVal.value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pLast, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
if (pLastCol->colVal.value.nData > 0 && NULL != pLastCol->colVal.value.pData)
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
@ -692,6 +716,7 @@ typedef struct SFSNextRowIter {
SArray *aDFileSet;
SDataFReader **pDataFReader;
SArray *aBlockIdx;
LRUHandle *aBlockIdxHandle;
SBlockIdx *pBlockIdx;
SMapData blockMap;
int32_t nBlock;
@ -745,6 +770,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
// tMapDataReset(&state->blockIdxMap);
/*
if (!state->aBlockIdx) {
state->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
} else {
@ -752,6 +778,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
code = tsdbReadBlockIdx(*state->pDataFReader, state->aBlockIdx);
if (code) goto _err;
*/
int32_t code = tsdbCacheGetBlockIdx(state->pTsdb->biCache, *state->pDataFReader, &state->aBlockIdxHandle);
if (code != TSDB_CODE_SUCCESS || state->aBlockIdxHandle == NULL) {
goto _err;
}
state->aBlockIdx = (SArray *)taosLRUCacheValue(state->pTsdb->biCache, state->aBlockIdxHandle);
/* if (state->pBlockIdx) { */
/* } */
@ -821,7 +853,10 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
// resetLastBlockLoadInfo(state->pLoadInfo);
if (state->aBlockIdx) {
taosArrayDestroy(state->aBlockIdx);
// taosArrayDestroy(state->aBlockIdx);
tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle);
state->aBlockIdxHandle = NULL;
state->aBlockIdx = NULL;
}
@ -844,7 +879,10 @@ _err:
resetLastBlockLoadInfo(state->pLoadInfo);
}*/
if (state->aBlockIdx) {
taosArrayDestroy(state->aBlockIdx);
// taosArrayDestroy(state->aBlockIdx);
tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle);
state->aBlockIdxHandle = NULL;
state->aBlockIdx = NULL;
}
if (state->pBlockData) {
@ -870,7 +908,10 @@ int32_t clearNextRowFromFS(void *iter) {
state->pDataFReader = NULL;
}*/
if (state->aBlockIdx) {
taosArrayDestroy(state->aBlockIdx);
// taosArrayDestroy(state->aBlockIdx);
tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle);
state->aBlockIdxHandle = NULL;
state->aBlockIdx = NULL;
}
if (state->pBlockData) {

View File

@ -41,6 +41,13 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
// add check for null value, caused by the modification of table schema (new column added).
if (pColVal == NULL) {
p->ts = 0;
p->isNull = true;
continue;
}
p->ts = pColVal->ts;
p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal);
allNullRow = p->isNull & allNullRow;
@ -99,6 +106,38 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
return TSDB_CODE_SUCCESS;
}
static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* idstr) {
int32_t numOfTables = p->numOfTables;
if (suid != 0) {
p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, suid, -1, 1);
if (p->pSchema == NULL) {
taosMemoryFree(p);
tsdbWarn("stable:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", suid, idstr);
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
}
} else {
for (int32_t i = 0; i < numOfTables; ++i) {
uint64_t uid = p->pTableList[i].uid;
p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, uid, -1, 1);
if (p->pSchema != NULL) {
break;
}
tsdbWarn("table:%" PRIu64 " has been dropped, failed to retrieve cached rows, %s", uid, idstr);
}
// all queried tables have been dropped already, return immediately.
if (p->pSchema == NULL) {
taosMemoryFree(p);
tsdbWarn("all queried tables has been dropped, try next group, %s", idstr);
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
}
}
return TSDB_CODE_SUCCESS;
}
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void** pReader, const char* idstr) {
*pReader = NULL;
@ -119,11 +158,15 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
return TSDB_CODE_SUCCESS;
}
STableKeyInfo* pKeyInfo = &((STableKeyInfo*)pTableIdList)[0];
p->pSchema = metaGetTbTSchema(p->pVnode->pMeta, pKeyInfo->uid, -1, 1);
p->pTableList = pTableIdList;
p->numOfTables = numOfTables;
int32_t code = setTableSchema(p, suid, idstr);
if (code != TSDB_CODE_SUCCESS) {
tsdbCacherowsReaderClose(p);
return code;
}
p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
if (p->transferBuf == NULL) {
tsdbCacherowsReaderClose(p);
@ -140,7 +183,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
}
}
p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0);
int32_t numOfStt = ((SVnode*)pVnode)->config.sttTrigger;
p->pLoadInfo = tCreateLastBlockLoadInfo(p->pSchema, NULL, 0, numOfStt);
if (p->pLoadInfo == NULL) {
tsdbCacherowsReaderClose(p);
return TSDB_CODE_OUT_OF_MEMORY;

View File

@ -92,24 +92,56 @@ static int32_t tGetSmaFile(uint8_t *p, SSmaFile *pSmaFile) {
}
// EXPOSED APIS ==================================================
static char* getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t commitId, char fname[]) {
const char* p1 = tfsGetDiskPath(pTsdb->pVnode->pTfs, did);
int32_t len = strlen(p1);
char* p = memcpy(fname, p1, len);
p += len;
*(p++) = TD_DIRSEP[0];
len = strlen(pTsdb->path);
memcpy(p, pTsdb->path, len);
p += len;
*(p++) = TD_DIRSEP[0];
*(p++) = 'v';
p += titoa(TD_VID(pTsdb->pVnode), 10, p);
*(p++) = 'f';
p += titoa(fid, 10, p);
memcpy(p, "ver", 3);
p += 3;
p += titoa(commitId, 10, p);
return p;
}
void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pHeadF->commitID, ".head");
char* p = getFileNamePrefix(pTsdb, did, fid, pHeadF->commitID, fname);
memcpy(p, ".head", 5);
p[5] = 0;
}
void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pDataF->commitID, ".data");
char* p = getFileNamePrefix(pTsdb, did, fid, pDataF->commitID, fname);
memcpy(p, ".data", 5);
p[5] = 0;
}
void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSttF->commitID, ".stt");
char* p = getFileNamePrefix(pTsdb, did, fid, pSttF->commitID, fname);
memcpy(p, ".stt", 4);
p[4] = 0;
}
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSmaF->commitID, ".sma");
char* p = getFileNamePrefix(pTsdb, did, fid, pSmaF->commitID, fname);
memcpy(p, ".sma", 4);
p[4] = 0;
}
bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2) { return pDelFile1->commitID == pDelFile2->commitID; }

View File

@ -31,14 +31,16 @@ struct SLDataIter {
SSttBlockLoadInfo *pBlockLoadInfo;
};
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols) {
SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(TSDB_MAX_STT_TRIGGER, sizeof(SSttBlockLoadInfo));
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols, int32_t numOfSttTrigger) {
SSttBlockLoadInfo *pLoadInfo = taosMemoryCalloc(numOfSttTrigger, sizeof(SSttBlockLoadInfo));
if (pLoadInfo == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
pLoadInfo->numOfStt = numOfSttTrigger;
for (int32_t i = 0; i < numOfSttTrigger; ++i) {
pLoadInfo[i].blockIndex[0] = -1;
pLoadInfo[i].blockIndex[1] = -1;
pLoadInfo[i].currentLoadBlockIndex = 1;
@ -63,7 +65,7 @@ SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList,
}
void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) {
pLoadInfo[i].currentLoadBlockIndex = 1;
pLoadInfo[i].blockIndex[0] = -1;
pLoadInfo[i].blockIndex[1] = -1;
@ -77,14 +79,14 @@ void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
}
void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el) {
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) {
*el += pLoadInfo[i].elapsedTime;
*blocks += pLoadInfo[i].loadBlocks;
}
}
void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
for (int32_t i = 0; i < TSDB_MAX_STT_TRIGGER; ++i) {
for (int32_t i = 0; i < pLoadInfo->numOfStt; ++i) {
pLoadInfo[i].currentLoadBlockIndex = 1;
pLoadInfo[i].blockIndex[0] = -1;
pLoadInfo[i].blockIndex[1] = -1;

View File

@ -133,17 +133,17 @@ typedef struct SFileBlockDumpInfo {
bool allDumped;
} SFileBlockDumpInfo;
typedef struct SUidOrderCheckInfo {
typedef struct STableUidList {
uint64_t* tableUidList; // access table uid list in uid ascending order list
int32_t currentIndex; // index in table uid list
} SUidOrderCheckInfo;
} STableUidList;
typedef struct SReaderStatus {
bool loadFromFile; // check file stage
bool composedDataBlock; // the returned data block is a composed block or not
SHashObj* pTableMap; // SHash<STableBlockScanInfo>
STableBlockScanInfo** pTableIter; // table iterator used in building in-memory buffer data blocks.
SUidOrderCheckInfo uidCheckInfo; // check all table in uid order
STableUidList uidList; // check tables in uid order, to avoid the repeatly load of blocks in STT.
SFileBlockDumpInfo fBlockDumpInfo;
SDFileSet* pCurrentFileset; // current opened file set
SBlockData fileBlockData;
@ -319,9 +319,19 @@ static void* getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index) {
return (*pBucket) + (index % pBuf->numPerBucket) * sizeof(STableBlockScanInfo);
}
static int32_t uidComparFunc(const void* p1, const void* p2) {
uint64_t pu1 = *(uint64_t*)p1;
uint64_t pu2 = *(uint64_t*)p2;
if (pu1 == pu2) {
return 0;
} else {
return (pu1 < pu2) ? -1 : 1;
}
}
// NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model
static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList,
int32_t numOfTables) {
STableUidList* pUidList, int32_t numOfTables) {
// allocate buffer in order to load data blocks from file
// todo use simple hash instead, optimize the memory consumption
SHashObj* pTableMap =
@ -333,9 +343,18 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf
int64_t st = taosGetTimestampUs();
initBlockScanInfoBuf(pBuf, numOfTables);
pUidList->tableUidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t));
if (pUidList->tableUidList == NULL) {
return NULL;
}
pUidList->currentIndex = 0;
for (int32_t j = 0; j < numOfTables; ++j) {
STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(pBuf, j);
pScanInfo->uid = idList[j].uid;
pUidList->tableUidList[j] = idList[j].uid;
if (ASCENDING_TRAVERSE(pTsdbReader->order)) {
int64_t skey = pTsdbReader->window.skey;
pScanInfo->lastKey = (skey > INT64_MIN) ? (skey - 1) : skey;
@ -349,6 +368,8 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf
pScanInfo->lastKey, pTsdbReader->idStr);
}
taosSort(pUidList->tableUidList, numOfTables, sizeof(uint64_t), uidComparFunc);
pTsdbReader->cost.createScanInfoList = (taosGetTimestampUs() - st) / 1000.0;
tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, elapsed time:%.2f ms, %s", pTsdbReader, numOfTables,
(sizeof(STableBlockScanInfo) * numOfTables) / 1024.0, pTsdbReader->cost.createScanInfoList,
@ -425,19 +446,6 @@ static STimeWindow updateQueryTimeWindow(STsdb* pTsdb, STimeWindow* pWindow) {
return win;
}
static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* capacity) {
int32_t rowLen = 0;
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
rowLen += pCond->colList[i].bytes;
}
// make sure the output SSDataBlock size be less than 2MB.
const int32_t TWOMB = 2 * 1024 * 1024;
if ((*capacity) * rowLen > TWOMB) {
(*capacity) = TWOMB / rowLen;
}
}
// init file iterator
static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader) {
size_t numOfFileset = taosArrayGetSize(aDFileSet);
@ -466,8 +474,10 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb
if (pLReader->pInfo == NULL) {
// here we ignore the first column, which is always be the primary timestamp column
pLReader->pInfo =
tCreateLastBlockLoadInfo(pReader->pSchema, &pReader->suppInfo.colId[1], pReader->suppInfo.numOfCols - 1);
SBlockLoadSuppInfo* pInfo = &pReader->suppInfo;
int32_t numOfStt = pReader->pTsdb->pVnode->config.sttTrigger;
pLReader->pInfo = tCreateLastBlockLoadInfo(pReader->pSchema, &pInfo->colId[1], pInfo->numOfCols - 1, numOfStt);
if (pLReader->pInfo == NULL) {
tsdbDebug("init fileset iterator failed, code:%s %s", tstrerror(terrno), pReader->idStr);
return terrno;
@ -478,13 +488,15 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb
return TSDB_CODE_SUCCESS;
}
static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bool *hasNext) {
bool asc = ASCENDING_TRAVERSE(pIter->order);
int32_t step = asc ? 1 : -1;
pIter->index += step;
int32_t code = 0;
if ((asc && pIter->index >= pIter->numOfFiles) || ((!asc) && pIter->index < 0)) {
return false;
*hasNext = false;
return TSDB_CODE_SUCCESS;
}
SIOCostSummary* pSum = &pReader->cost;
@ -504,7 +516,7 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
pReader->status.pCurrentFileset = (SDFileSet*)taosArrayGet(pIter->pFileList, pIter->index);
int32_t code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset);
code = tsdbDataFReaderOpen(&pReader->pFileReader, pReader->pTsdb, pReader->status.pCurrentFileset);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
@ -518,24 +530,28 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
if ((asc && win.skey > pReader->window.ekey) || (!asc && win.ekey < pReader->window.skey)) {
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %s", pReader,
pReader->window.skey, pReader->window.ekey, pReader->idStr);
return false;
*hasNext = false;
return TSDB_CODE_SUCCESS;
}
if ((asc && (win.ekey < pReader->window.skey)) || ((!asc) && (win.skey > pReader->window.ekey))) {
pIter->index += step;
if ((asc && pIter->index >= pIter->numOfFiles) || ((!asc) && pIter->index < 0)) {
return false;
*hasNext = false;
return TSDB_CODE_SUCCESS;
}
continue;
}
tsdbDebug("%p file found fid:%d for qrange:%" PRId64 "-%" PRId64 ", %s", pReader, fid, pReader->window.skey,
pReader->window.ekey, pReader->idStr);
return true;
*hasNext = true;
return TSDB_CODE_SUCCESS;
}
_err:
return false;
*hasNext = false;
return code;
}
static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) {
@ -682,9 +698,6 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
goto _end;
}
// todo refactor.
limitOutputBufferSize(pCond, &pReader->capacity);
// allocate buffer in order to load data blocks from file
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
pSup->pColAgg = taosArrayInit(pCond->numOfCols, sizeof(SColumnDataAgg));
@ -701,7 +714,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
goto _end;
}
setColumnIdSlotList(&pReader->suppInfo, pCond->colList, pCond->pSlotList, pCond->numOfCols);
setColumnIdSlotList(pSup, pCond->colList, pCond->pSlotList, pCond->numOfCols);
tsdbInitReaderLock(pReader);
@ -715,57 +728,75 @@ _end:
}
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) {
// SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx));
int64_t st = taosGetTimestampUs();
// int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx);
int64_t st = taosGetTimestampUs();
LRUHandle* handle = NULL;
int32_t code = tsdbCacheGetBlockIdx(pFileReader->pTsdb->biCache, pFileReader, &handle);
if (code != TSDB_CODE_SUCCESS || handle == NULL) {
goto _end;
}
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
SArray* aBlockIdx = (SArray*)taosLRUCacheValue(pFileReader->pTsdb->biCache, handle);
size_t num = taosArrayGetSize(aBlockIdx);
if (num == 0) {
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
// taosArrayDestroy(aBlockIdx);
return TSDB_CODE_SUCCESS;
}
// todo binary search to the start position
int64_t et1 = taosGetTimestampUs();
SBlockIdx* pBlockIdx = NULL;
for (int32_t i = 0; i < num; ++i) {
SBlockIdx* pBlockIdx = NULL;
STableUidList* pList = &pReader->status.uidList;
int32_t i = 0, j = 0;
while (i < num && j < numOfTables) {
pBlockIdx = (SBlockIdx*)taosArrayGet(aBlockIdx, i);
// uid check
if (pBlockIdx->suid != pReader->suid) {
i += 1;
continue;
}
// this block belongs to a table that is not queried.
void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t));
if (p == NULL) {
if (pBlockIdx->uid < pList->tableUidList[j]) {
i += 1;
continue;
}
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p;
if (pScanInfo->pBlockList == NULL) {
pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex));
if (pBlockIdx->uid > pList->tableUidList[j]) {
j += 1;
continue;
}
taosArrayPush(pIndexList, pBlockIdx);
if (pBlockIdx->uid == pList->tableUidList[j]) {
// this block belongs to a table that is not queried.
void* p = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(uint64_t));
if (p == NULL) {
tsdbError("failed to locate the tableBlockScan Info in hashmap, uid:%" PRIu64 ", %s", pBlockIdx->uid,
pReader->idStr);
return TSDB_CODE_APP_ERROR;
}
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p;
if (pScanInfo->pBlockList == NULL) {
pScanInfo->pBlockList = taosArrayInit(4, sizeof(SBlockIndex));
}
taosArrayPush(pIndexList, pBlockIdx);
i += 1;
j += 1;
}
}
int64_t et2 = taosGetTimestampUs();
tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s",
(int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, pReader->idStr);
tsdbDebug("load block index for %d/%d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s",
numOfTables, (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0,
pReader->idStr);
pReader->cost.headFileLoadTime += (et1 - st) / 1000.0;
_end:
// taosArrayDestroy(aBlockIdx);
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
return code;
}
@ -1691,7 +1722,7 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange:%" PRId64
" - %" PRId64 ", uid:%"PRIu64", %s",
" - %" PRId64 ", uid:%" PRIu64 ", %s",
pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey,
pBlockScanInfo->uid, pReader->idStr);
@ -1721,7 +1752,7 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo,
SVersionRange* pVerRange) {
int32_t step = ASCENDING_TRAVERSE(pLastBlockReader->order)? 1:-1;
int32_t step = ASCENDING_TRAVERSE(pLastBlockReader->order) ? 1 : -1;
while (1) {
bool hasVal = tMergeTreeNext(&pLastBlockReader->mergeTree);
@ -2407,7 +2438,8 @@ static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScan
w.ekey = pScanInfo->lastKey + step;
}
tsdbDebug("init last block reader, window:%"PRId64"-%"PRId64", uid:%"PRIu64", %s", w.skey, w.ekey, pScanInfo->uid, pReader->idStr);
tsdbDebug("init last block reader, window:%" PRId64 "-%" PRId64 ", uid:%" PRIu64 ", %s", w.skey, w.ekey,
pScanInfo->uid, pReader->idStr);
int32_t code = tMergeTreeOpen(&pLBlockReader->mergeTree, (pLBlockReader->order == TSDB_ORDER_DESC),
pReader->pFileReader, pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange,
pLBlockReader->pInfo, false, pReader->idStr);
@ -2770,13 +2802,19 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx));
while (1) {
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
bool hasNext = false;
int32_t code = filesetIteratorNext(&pStatus->fileIter, pReader, &hasNext);
if (code) {
taosArrayDestroy(pIndexList);
return code;
}
if (!hasNext) { // no data files on disk
break;
}
taosArrayClear(pIndexList);
int32_t code = doLoadBlockIndex(pReader, pReader->pFileReader, pIndexList);
code = doLoadBlockIndex(pReader, pReader->pFileReader, pIndexList);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pIndexList);
return code;
@ -2824,74 +2862,15 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
return TSDB_CODE_SUCCESS;
}
static int32_t uidComparFunc(const void* p1, const void* p2) {
uint64_t pu1 = *(uint64_t*)p1;
uint64_t pu2 = *(uint64_t*)p2;
if (pu1 == pu2) {
return 0;
} else {
return (pu1 < pu2) ? -1 : 1;
}
static void resetTableListIndex(SReaderStatus* pStatus) {
STableUidList* pList = &pStatus->uidList;
pList->currentIndex = 0;
uint64_t uid = pList->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
}
static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) {
int32_t index = 0;
int32_t total = taosHashGetSize(pStatus->pTableMap);
void* p = taosHashIterate(pStatus->pTableMap, NULL);
while (p != NULL) {
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p;
pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid;
p = taosHashIterate(pStatus->pTableMap, p);
}
taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc);
}
static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
int32_t total = taosHashGetSize(pStatus->pTableMap);
if (total == 0) {
return TSDB_CODE_SUCCESS;
}
if (pOrderCheckInfo->tableUidList == NULL) {
pOrderCheckInfo->currentIndex = 0;
pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t));
if (pOrderCheckInfo->tableUidList == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order);
uint64_t uid = pOrderCheckInfo->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
} else {
if (pStatus->pTableIter == NULL) { // it is the last block of a new file
pOrderCheckInfo->currentIndex = 0;
uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
// the tableMap has already updated
if (pStatus->pTableIter == NULL) {
void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t));
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pOrderCheckInfo->tableUidList = p;
extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order);
uid = pOrderCheckInfo->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
}
}
}
return TSDB_CODE_SUCCESS;
}
static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus* pStatus) {
static bool moveToNextTable(STableUidList* pOrderedCheckInfo, SReaderStatus* pStatus) {
pOrderedCheckInfo->currentIndex += 1;
if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) {
pStatus->pTableIter = NULL;
@ -2906,11 +2885,10 @@ static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus
static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
STableUidList* pUidList = &pStatus->uidList;
SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo;
int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader);
if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) {
return code;
if (taosHashGetSize(pStatus->pTableMap) == 0) {
return TSDB_CODE_SUCCESS;
}
SSDataBlock* pResBlock = pReader->pResBlock;
@ -2921,7 +2899,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
if (!hasVal) {
bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
@ -2956,7 +2934,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
}
// current table is exhausted, let's try next table
bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
@ -3061,14 +3039,15 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
STableUidList* pUidList = &pStatus->uidList;
while (1) {
if (pStatus->pTableIter == NULL) {
pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
if (pStatus->pTableIter == NULL) {
return TSDB_CODE_SUCCESS;
}
}
// if (pStatus->pTableIter == NULL) {
// pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
// if (pStatus->pTableIter == NULL) {
// return TSDB_CODE_SUCCESS;
// }
// }
STableBlockScanInfo** pBlockScanInfo = pStatus->pTableIter;
initMemDataIterator(*pBlockScanInfo, pReader);
@ -3083,9 +3062,9 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
return TSDB_CODE_SUCCESS;
}
// current table is exhausted, let's try the next table
pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
if (pStatus->pTableIter == NULL) {
// current table is exhausted, let's try next table
bool hasNexTable = moveToNextTable(pUidList, pStatus);
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
}
@ -3114,8 +3093,7 @@ static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter)
static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
SBlockNumber num = {0};
int32_t code = moveToNextFile(pReader, &num);
int32_t code = moveToNextFile(pReader, &num);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -3132,6 +3110,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
} else { // no block data, only last block exists
tBlockDataReset(&pReader->status.fileBlockData);
resetDataBlockIterator(pBlockIter, pReader->order);
resetTableListIndex(&pReader->status);
}
// set the correct start position according to the query time window
@ -3172,6 +3151,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
// this file does not have data files, let's start check the last block file if exists
if (pBlockIter->numOfBlocks == 0) {
resetTableListIndex(&pReader->status);
goto _begin;
}
}
@ -3208,6 +3188,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
tBlockDataReset(pBlockData);
resetDataBlockIterator(pBlockIter, pReader->order);
resetTableListIndex(&pReader->status);
goto _begin;
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
@ -3219,6 +3200,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
// this file does not have blocks, let's start check the last block file
if (pBlockIter->numOfBlocks == 0) {
resetTableListIndex(&pReader->status);
goto _begin;
}
}
@ -3910,11 +3892,15 @@ int32_t tsdbSetTableList(STsdbReader* pReader, const void* pTableList, int32_t n
ASSERT(size >= num);
taosHashClear(pReader->status.pTableMap);
STableUidList* pUidList = &pReader->status.uidList;
pUidList->currentIndex = 0;
STableKeyInfo* pList = (STableKeyInfo*)pTableList;
for (int32_t i = 0; i < num; ++i) {
STableBlockScanInfo* pInfo = getPosInBlockInfoBuf(&pReader->blockInfoBuf, i);
pInfo->uid = pList[i].uid;
pUidList->tableUidList[i] = pList[i].uid;
taosHashPut(pReader->status.pTableMap, &pInfo->uid, sizeof(uint64_t), &pInfo, POINTER_BYTES);
}
@ -3938,18 +3924,24 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
static int32_t doOpenReaderImpl(STsdbReader* pReader) {
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(&pStatus->blockIter, pReader->order);
// no data in files, let's try buffer in memory
if (pReader->status.fileIter.numOfFiles == 0) {
pReader->status.loadFromFile = false;
return TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
if (pStatus->fileIter.numOfFiles == 0) {
pStatus->loadFromFile = false;
} else {
return initForFirstBlockInFile(pReader, pBlockIter);
code = initForFirstBlockInFile(pReader, pBlockIter);
}
if (!pStatus->loadFromFile) {
resetTableListIndex(pStatus);
}
return code;
}
// ====================================== EXPOSED APIs ======================================
@ -3961,11 +3953,9 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
pCond->twindows.ekey -= 1;
}
int32_t capacity = 0;
if (pResBlock == NULL) {
capacity = 4096;
} else {
capacity = pResBlock->info.capacity;
int32_t capacity = pVnode->config.tsdbCfg.maxRows;
if (pResBlock != NULL) {
blockDataEnsureCapacity(pResBlock, capacity);
}
int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, capacity, pResBlock, idstr);
@ -4038,7 +4028,8 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
}
STsdbReader* p = (pReader->innerReader[0] != NULL) ? pReader->innerReader[0] : pReader;
pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, numOfTables);
pReader->status.pTableMap =
createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, &pReader->status.uidList, numOfTables);
if (pReader->status.pTableMap == NULL) {
*ppReader = NULL;
code = TSDB_CODE_OUT_OF_MEMORY;
@ -4053,6 +4044,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
_err:
tsdbError("failed to create data reader, code:%s %s", tstrerror(code), idstr);
tsdbReaderClose(pReader);
*ppReader = NULL; // reset the pointer value.
return code;
}
@ -4067,6 +4059,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
STsdbReader* p = pReader->innerReader[0];
p->status.pTableMap = NULL;
p->status.uidList.tableUidList = NULL;
p->pReadSnap = NULL;
p->pSchema = NULL;
p->pMemSchema = NULL;
@ -4074,6 +4067,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
p = pReader->innerReader[1];
p->status.pTableMap = NULL;
p->status.uidList.tableUidList = NULL;
p->pReadSnap = NULL;
p->pSchema = NULL;
p->pMemSchema = NULL;
@ -4127,7 +4121,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
tsdbUninitReaderLock(pReader);
taosMemoryFree(pReader->status.uidCheckInfo.tableUidList);
taosMemoryFree(pReader->status.uidList.tableUidList);
SIOCostSummary* pCost = &pReader->cost;
SFilesetIter* pFilesetIter = &pReader->status.fileIter;
@ -4321,12 +4315,14 @@ int32_t tsdbReaderResume(STsdbReader* pReader) {
// we need only one row
pPrevReader->capacity = 1;
pPrevReader->status.pTableMap = pReader->status.pTableMap;
pPrevReader->status.uidList = pReader->status.uidList;
pPrevReader->pSchema = pReader->pSchema;
pPrevReader->pMemSchema = pReader->pMemSchema;
pPrevReader->pReadSnap = pReader->pReadSnap;
pNextReader->capacity = 1;
pNextReader->status.pTableMap = pReader->status.pTableMap;
pNextReader->status.uidList = pReader->status.uidList;
pNextReader->pSchema = pReader->pSchema;
pNextReader->pMemSchema = pReader->pMemSchema;
pNextReader->pReadSnap = pReader->pReadSnap;
@ -4368,6 +4364,7 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) {
if (pBlock->info.rows > 0) {
return true;
} else {
resetTableListIndex(&pReader->status);
buildBlockFromBufferSequentially(pReader);
return pBlock->info.rows > 0;
}
@ -4378,7 +4375,7 @@ static bool doTsdbNextDataBlock(STsdbReader* pReader) {
}
bool tsdbNextDataBlock(STsdbReader* pReader) {
if (isEmptyQueryTimeWindow(&pReader->window)) {
if (isEmptyQueryTimeWindow(&pReader->window) || pReader->step == EXTERNAL_ROWS_NEXT) {
return false;
}
@ -4427,7 +4424,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
return ret;
}
if (pReader->innerReader[1] != NULL && pReader->step == EXTERNAL_ROWS_MAIN) {
if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) {
// prepare for the next row scan
int32_t code = doOpenReaderImpl(pReader->innerReader[1]);
resetAllDataBlockScanInfo(pReader->innerReader[1]->status.pTableMap, pReader->window.ekey);
@ -4435,16 +4432,16 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
return code;
}
bool ret1 = doTsdbNextDataBlock(pReader->innerReader[1]);
ret = doTsdbNextDataBlock(pReader->innerReader[1]);
pReader->step = EXTERNAL_ROWS_NEXT;
if (ret1) {
if (ret) {
pStatus = &pReader->innerReader[1]->status;
if (pStatus->composedDataBlock) {
qTrace("tsdb/read: %p, unlock read mutex", pReader);
tsdbReleaseReader(pReader);
}
return ret1;
return ret;
}
}
@ -4612,8 +4609,6 @@ SSDataBlock* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
}
int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
SReaderStatus* pStatus = &pReader->status;
qTrace("tsdb/reader-reset: %p, take read mutex", pReader);
tsdbAcquireReader(pReader);
@ -4629,12 +4624,14 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
return TSDB_CODE_SUCCESS;
}
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
pReader->order = pCond->order;
pReader->type = TIMEWINDOW_RANGE_CONTAINED;
pReader->status.loadFromFile = true;
pReader->status.pTableIter = NULL;
pStatus->loadFromFile = true;
pStatus->pTableIter = NULL;
pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
// allocate buffer in order to load data blocks from file
@ -4643,19 +4640,21 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
pReader->suppInfo.tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
tsdbDataFReaderClose(&pReader->pFileReader);
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
int32_t numOfTables = taosHashGetSize(pStatus->pTableMap);
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
initFilesetIterator(&pStatus->fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(pBlockIter, pReader->order);
resetTableListIndex(&pReader->status);
int64_t ts = ASCENDING_TRAVERSE(pReader->order) ? pReader->window.skey - 1 : pReader->window.ekey + 1;
resetAllDataBlockScanInfo(pReader->status.pTableMap, ts);
resetAllDataBlockScanInfo(pStatus->pTableMap, ts);
int32_t code = 0;
// no data in files, let's try buffer in memory
if (pReader->status.fileIter.numOfFiles == 0) {
pReader->status.loadFromFile = false;
if (pStatus->fileIter.numOfFiles == 0) {
pStatus->loadFromFile = false;
resetTableListIndex(pStatus);
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
if (code != TSDB_CODE_SUCCESS) {
@ -4739,7 +4738,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
hasNext = blockIteratorNext(&pStatus->blockIter, pReader->idStr);
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
if ((code != TSDB_CODE_SUCCESS) || (pStatus->loadFromFile == false)) {
break;
}

View File

@ -47,15 +47,21 @@ static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsd
taosMemoryFree(pFD);
goto _exit;
}
if (taosStatFile(path, &pFD->szFile, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosMemoryFree(pFD->pBuf);
taosCloseFile(&pFD->pFD);
taosMemoryFree(pFD);
goto _exit;
// not check file size when reading data files.
if (flag != TD_FILE_READ) {
if (taosStatFile(path, &pFD->szFile, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
taosMemoryFree(pFD->pBuf);
taosCloseFile(&pFD->pFD);
taosMemoryFree(pFD);
goto _exit;
}
ASSERT(pFD->szFile % szPage == 0);
pFD->szFile = pFD->szFile / szPage;
}
ASSERT(pFD->szFile % szPage == 0);
pFD->szFile = pFD->szFile / szPage;
*ppFD = pFD;
_exit:
@ -103,7 +109,7 @@ _exit:
static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno) {
int32_t code = 0;
ASSERT(pgno <= pFD->szFile);
// ASSERT(pgno <= pFD->szFile);
// seek
int64_t offset = PAGE_OFFSET(pgno, pFD->szPage);
@ -175,7 +181,7 @@ static int32_t tsdbReadFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t
int32_t szPgCont = PAGE_CONTENT_SIZE(pFD->szPage);
int64_t bOffset = fOffset % pFD->szPage;
ASSERT(pgno && pgno <= pFD->szFile);
// ASSERT(pgno && pgno <= pFD->szFile);
ASSERT(bOffset < szPgCont);
while (n < size) {

View File

@ -328,10 +328,6 @@ static int32_t tsdbSnapCmprTombData(STsdbSnapReader* pReader, uint8_t** ppData)
_exit:
if (code) {
tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code));
if (pData) {
taosMemoryFree(pData);
pData = NULL;
}
}
*ppData = pData;
return code;
@ -404,7 +400,7 @@ static int32_t tsdbSnapReadTombData(STsdbSnapReader* pReader, uint8_t** ppData)
}
while (pDelInfo && pDelInfo->suid == pReader->tbid.suid && pDelInfo->uid == pReader->tbid.uid) {
if (taosArrayPush(pReader->aDelData, &pDelInfo->delData) < 0) {
if (taosArrayPush(pReader->aDelData, &pDelInfo->delData) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1252,7 +1248,7 @@ static int32_t tsdbSnapWriteDelTableData(STsdbSnapWriter* pWriter, TABLEID* pId,
SDelData delData;
n += tGetDelData(pData + n, &delData);
if (taosArrayPush(pWriter->aDelData, &delData) < 0) {
if (taosArrayPush(pWriter->aDelData, &delData) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1420,6 +1416,7 @@ _exit:
tBlockDataDestroy(&pWriter->bData);
tBlockDataDestroy(&pWriter->inData);
tsdbFSDestroy(&pWriter->fs);
taosMemoryFree(pWriter);
pWriter = NULL;
}
} else {

View File

@ -116,12 +116,7 @@ int32_t tMapDataToArray(SMapData *pMapData, int32_t itemSize, int32_t (*tGetItem
}
_exit:
if (code) {
*ppArray = NULL;
if (pArray) taosArrayDestroy(pArray);
} else {
*ppArray = pArray;
}
*ppArray = pArray;
return code;
}
@ -1051,9 +1046,7 @@ static int32_t tsdbMergeSkyline(SArray *pSkyline1, SArray *pSkyline2, SArray *pS
i2++;
}
taosArraySetSize(pSkyline, TARRAY_ELEM_IDX(pSkyline, pItem));
_exit:
pSkyline->size = TARRAY_ELEM_IDX(pSkyline, pItem);
return code;
}
@ -1235,14 +1228,22 @@ int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema,
int32_t iColumn = 1;
STColumn *pTColumn = &pTSchema->columns[iColumn];
for (int32_t iCid = 0; iCid < nCid; iCid++) {
ASSERT(pTColumn);
if (ASSERTS(pTColumn != NULL, "invalid input param")) {
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}
while (pTColumn->colId < aCid[iCid]) {
iColumn++;
ASSERT(iColumn < pTSchema->numOfCols);
pTColumn = &pTSchema->columns[iColumn];
}
ASSERT(pTColumn->colId == aCid[iCid]);
if (ASSERTS(pTColumn->colId == aCid[iCid], "invalid input param")) {
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}
tColDataInit(&pBlockData->aColData[iCid], pTColumn->colId, pTColumn->type,
(pTColumn->flags & COL_SMA_ON) ? 1 : 0);

View File

@ -31,7 +31,9 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq2 *pMsg, SSubmitRsp2
int32_t affectedrows = 0;
int32_t numOfRows = 0;
ASSERT(pTsdb->mem != NULL);
if (ASSERTS(pTsdb->mem != NULL, "vgId:%d, mem is NULL", TD_VID(pTsdb->pVnode))) {
return -1;
}
if (pMsg) {
arrSize = taosArrayGetSize(pMsg->aSubmitTbData);

View File

@ -36,8 +36,8 @@ static int32_t vnodeCompactTask(void *param) {
vnodeCommitInfo(dir);
_exit:
taosMemoryFree(pInfo);
tsem_post(&pInfo->pVnode->canCommit);
taosMemoryFree(pInfo);
return code;
}
static int32_t vnodePrepareCompact(SVnode *pVnode, SCompactInfo *pInfo) {
@ -59,9 +59,17 @@ static int32_t vnodePrepareCompact(SVnode *pVnode, SCompactInfo *pInfo) {
snprintf(dir, TSDB_FILENAME_LEN, "%s", pVnode->path);
}
vnodeLoadInfo(dir, &info);
if (vnodeLoadInfo(dir, &info) < 0) {
code = terrno;
goto _exit;
}
info.state.commitID = pInfo->commitID;
vnodeSaveInfo(dir, &info);
if (vnodeSaveInfo(dir, &info) < 0) {
code = terrno;
goto _exit;
}
_exit:
if (code) {

View File

@ -48,7 +48,7 @@ int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) {
info.state.applied = -1;
info.state.commitID = 0;
vInfo("vgId:%d, save config while create", pCfg->vgId);
vInfo("vgId:%d, save config while create", info.config.vgId);
if (vnodeSaveInfo(dir, &info) < 0 || vnodeCommitInfo(dir) < 0) {
vError("vgId:%d, failed to save vnode config since %s", pCfg ? pCfg->vgId : 0, tstrerror(terrno));
return -1;
@ -124,7 +124,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr
while (1) {
const STfsFile *tsdbFile = tfsReaddir(tsdbDir);
if (tsdbFile == NULL) break;
if (tsdbFile->rname == NULL) continue;
if (tsdbFile->rname[0] == '\0') continue;
tstrncpy(oldRname, tsdbFile->rname, TSDB_FILENAME_LEN);
char *tsdbFilePrefixPos = strstr(oldRname, tsdbFilePrefix);
@ -365,7 +365,7 @@ _err:
if (pVnode->pWal) walClose(pVnode->pWal);
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
if (pVnode->pSma) smaClose(pVnode->pSma);
if (pVnode->pMeta) metaClose(pVnode->pMeta);
if (pVnode->pMeta) metaClose(&pVnode->pMeta);
if (pVnode->freeList) vnodeCloseBufPool(pVnode);
tsem_destroy(&(pVnode->canCommit));
@ -389,7 +389,7 @@ void vnodeClose(SVnode *pVnode) {
tqClose(pVnode->pTq);
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
smaClose(pVnode->pSma);
metaClose(pVnode->pMeta);
if (pVnode->pMeta) metaClose(&pVnode->pMeta);
vnodeCloseBufPool(pVnode);
tsem_post(&pVnode->canCommit);

View File

@ -141,7 +141,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
*(int64_t *)(pCoder->data + pCoder->pos) = uid;
pCoder->pos += sizeof(int64_t);
} else {
tDecodeI64(pCoder, &submitTbData.uid);
if (tDecodeI64(pCoder, &submitTbData.uid) < 0) {
code = TSDB_CODE_INVALID_MSG;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
if (tDecodeI32v(pCoder, &submitTbData.sver) < 0) {
@ -168,6 +171,11 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int
SColData colData = {0};
pCoder->pos += tGetColData(pCoder->data + pCoder->pos, &colData);
if (colData.flag != HAS_VALUE) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
for (int32_t iRow = 0; iRow < colData.nVal; iRow++) {
if (((TSKEY *)colData.pData)[iRow] < minKey || ((TSKEY *)colData.pData)[iRow] > maxKey) {
code = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE;
@ -440,10 +448,13 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
walApplyVer(pVnode->pWal, version);
/*vInfo("vgId:%d, push msg begin", pVnode->config.vgId);*/
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
/*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/
vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
/*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/
// commit if need
if (needCommit) {
@ -479,7 +490,6 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in vnode query queue is processing");
// if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsLeader(pVnode)) {
if ((pMsg->msgType == TDMT_SCH_QUERY) && !syncIsReadyForRead(pVnode->sync)) {
vnodeRedirectRpcMsg(pVnode, pMsg, terrno);
return 0;
@ -503,7 +513,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
pMsg->msgType == TDMT_VND_BATCH_META) &&
!syncIsReadyForRead(pVnode->sync)) {
// !vnodeIsLeader(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg, terrno);
return 0;
}

View File

@ -283,7 +283,7 @@ TEST(testCase, tSma_metaDB_Put_Get_Del_Test) {
metaRemoveSmaFromDb(pMeta, indexUid2);
tDestroyTSma(&tSma);
metaClose(pMeta);
metaClose(&pMeta);
}
#endif
@ -577,9 +577,9 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
tDestroyTSma(&tSma);
tfsClose(pTsdb->pTfs);
tsdbClose(pTsdb);
metaClose(pMeta);
metaClose(&pMeta);
}
#endif
#pragma GCC diagnostic pop
#pragma GCC diagnostic pop

View File

@ -300,7 +300,7 @@ typedef struct SCtgSubRes {
ctgSubTaskCbFp fp;
} SCtgSubRes;
typedef struct SCtgTask {
struct SCtgTask {
CTG_TASK_TYPE type;
int32_t taskId;
SCtgJob* pJob;
@ -313,7 +313,7 @@ typedef struct SCtgTask {
SRWLatch lock;
SArray* pParents;
SCtgSubRes subRes;
} SCtgTask;
};
typedef struct SCtgTaskReq {
SCtgTask* pTask;

View File

@ -1707,9 +1707,7 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum);
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
SName* pName = ctgGetFetchName(pCtx->pNames, pFetch);
@ -1844,7 +1842,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
ctgAddFetch(&pCtx->pFetchs, i, -1, &fetchIdx, baseResIdx, 0);
baseResIdx += taosArrayGetSize(pReq->pTables);
taosArraySetSize(pCtx->pResList, baseResIdx);
int32_t inc = baseResIdx - taosArrayGetSize(pCtx->pResList);
for(int32_t j = 0; j < inc; ++j) {
taosArrayPush(pCtx->pResList, &(SMetaRes){0});
}
}
}
@ -1856,8 +1857,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
pTask->msgCtxs = taosArrayInit_s(pCtx->fetchNum, sizeof(SCtgMsgCtx), pCtx->fetchNum);
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);

View File

@ -493,11 +493,9 @@ int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCt
//ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
if (tbCache) {
CTG_UNLOCK(CTG_READ, &tbCache->metaLock);
taosHashRelease(dbCache->tbCache, tbCache);
*pTb = NULL;
}
CTG_UNLOCK(CTG_READ, &tbCache->metaLock);
taosHashRelease(dbCache->tbCache, tbCache);
*pTb = NULL;
ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", ctx->pName->tname,
ctx->tbInfo.tbType, dbFName);
@ -1554,8 +1552,8 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
SCtgTbCache cache = {0};
cache.pMeta = meta;
if (taosHashPut(dbCache->tbCache, tbName, strlen(tbName), &cache, sizeof(SCtgTbCache)) != 0) {
taosMemoryFree(meta);
ctgError("taosHashPut new tbCache failed, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
taosMemoryFree(meta);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
@ -2480,20 +2478,20 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
ctgDebug("db %s not in cache", dbFName);
for (int32_t i = 0; i < tbNum; ++i) {
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaData){0});
}
return TSDB_CODE_SUCCESS;
}
for (int32_t i = 0; i < tbNum; ++i) {
SName *pName = taosArrayGet(pList, i);
pName = taosArrayGet(pList, i);
pCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname));
if (NULL == pCache) {
ctgDebug("tb %s not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
continue;
}
@ -2503,7 +2501,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
ctgDebug("tb %s meta not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
continue;
}
@ -2576,7 +2574,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
if (NULL == stName) {
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", pTableMeta->suid, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);
continue;
@ -2588,7 +2586,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
taosHashRelease(dbCache->stbCache, stName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);
continue;
@ -2603,7 +2601,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
taosHashRelease(dbCache->tbCache, pCache);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);
@ -2619,7 +2617,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
nctx.tbInfo.suid);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
taosArrayPush(ctx->pResList, &(SMetaRes){0});
taosMemoryFreeClear(pTableMeta);

View File

@ -44,6 +44,8 @@
typedef struct SGroupResInfo {
int32_t index;
SArray* pRows; // SArray<SResKeyPos>
char* pBuf;
bool freeItem;
} SGroupResInfo;
typedef struct SResultRow {
@ -115,10 +117,6 @@ struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t i
static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos, bool forUpdate) {
SFilePage* bufPage = (SFilePage*)getBufPage(pBuf, pos->pageId);
if (NULL == bufPage) {
return NULL;
}
if (forUpdate) {
setBufPageDirty(bufPage, true);
}

View File

@ -149,6 +149,10 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
// check if it is a group by tbname
if ((pInfo->retrieveType & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) {
if (isTaskKilled(pTaskInfo)) {
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
if (pInfo->indexOfBufferedRes >= pInfo->pBufferredRes->info.rows) {
blockDataCleanup(pInfo->pBufferredRes);
taosArrayClear(pInfo->pUidList);
@ -207,6 +211,10 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
size_t totalGroups = tableListGetOutputGroups(pTableList);
while (pInfo->currentGroupIndex < totalGroups) {
if (isTaskKilled(pTaskInfo)) {
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
STableKeyInfo* pList = NULL;
int32_t num = 0;
@ -215,8 +223,15 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
T_LONG_JMP(pTaskInfo->env, code);
}
tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num,
taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, pTaskInfo->id.str);
code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num,
taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader,
pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
pInfo->currentGroupIndex += 1;
taosArrayClear(pInfo->pUidList);
continue;
}
taosArrayClear(pInfo->pUidList);
code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);

View File

@ -50,11 +50,13 @@ typedef enum {
} FilterCondType;
static FilterCondType checkTagCond(SNode* cond);
static int32_t removeInvalidTable(SArray* uids, SHashObj* tags);
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SHashObj* tags);
static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond);
static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond,
SNode* pTagIndexCond, STableListInfo* pListInfo);
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond);
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond);
static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond,
SNode* pTagIndexCond, STableListInfo* pListInfo, const char* idstr);
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
void* metaHandle);
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
@ -95,15 +97,18 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
return rowSize;
}
static void freeEx(void* p) { taosMemoryFree(*(void**)p); }
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) {
assert(pGroupResInfo != NULL);
for (int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
SResKeyPos* pRes = taosArrayGetP(pGroupResInfo->pRows, i);
taosMemoryFree(pRes);
taosMemoryFreeClear(pGroupResInfo->pBuf);
if (pGroupResInfo->freeItem) {
// taosArrayDestroy(pGroupResInfo->pRows);
taosArrayDestroyEx(pGroupResInfo->pRows, freeEx);
pGroupResInfo->freeItem = false;
pGroupResInfo->pRows = NULL;
} else {
pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows);
}
pGroupResInfo->pRows = taosArrayDestroy(pGroupResInfo->pRows);
pGroupResInfo->index = 0;
}
@ -133,26 +138,40 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in
}
// extract the result rows information from the hash map
void* pData = NULL;
pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES);
int32_t size = tSimpleHashGetSize(pHashmap);
void* pData = NULL;
pGroupResInfo->pRows = taosArrayInit(size, POINTER_BYTES);
// todo avoid repeated malloc memory
size_t keyLen = 0;
int32_t iter = 0;
int32_t bufLen = 0, offset = 0;
// todo move away and record this during create window
while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
/*void* key = */ tSimpleHashGetKey(pData, &keyLen);
bufLen += keyLen + sizeof(SResultRowPosition);
}
pGroupResInfo->pBuf = taosMemoryMalloc(bufLen);
iter = 0;
while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
void* key = tSimpleHashGetKey(pData, &keyLen);
SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition));
SResKeyPos* p = (SResKeyPos*)(pGroupResInfo->pBuf + offset);
p->groupId = *(uint64_t*)key;
p->pos = *(SResultRowPosition*)pData;
memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t));
taosArrayPush(pGroupResInfo->pRows, &p);
offset += keyLen + sizeof(struct SResultRowPosition);
}
if (order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC) {
__compar_fn_t fn = (order == TSDB_ORDER_ASC) ? resultrowComparAsc : resultrowComparDesc;
int32_t size = POINTER_BYTES;
size = POINTER_BYTES;
taosSort(pGroupResInfo->pRows->pData, taosArrayGetSize(pGroupResInfo->pRows), size, fn);
}
@ -165,6 +184,7 @@ void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL
taosArrayDestroy(pGroupResInfo->pRows);
}
pGroupResInfo->freeItem = true;
pGroupResInfo->pRows = pArrayList;
pGroupResInfo->index = 0;
ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo));
@ -179,7 +199,6 @@ bool hasRemainResults(SGroupResInfo* pGroupResInfo) {
}
int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) {
assert(pGroupResInfo != NULL);
if (pGroupResInfo->pRows == 0) {
return 0;
}
@ -399,159 +418,6 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara
return TSDB_CODE_SUCCESS;
}
static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray* uidList, SNode* pTagCond,
SIdxFltStatus status) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
SHashObj* tags = NULL;
SScalarParam output = {0};
tagFilterAssist ctx = {0};
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
if (ctx.colHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
if (ctx.cInfoList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx);
pResBlock = createDataBlock();
if (pResBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
SColumnInfoData colInfo = {0};
colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
// int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
FilterCondType condType = checkTagCond(pTagCond);
int32_t filter = optimizeTbnameInCond(metaHandle, suid, uidList, pTagCond, tags);
if (filter == -1) {
if ((condType == FILTER_NO_LOGIC || condType == FILTER_AND) && status != SFLT_NOT_INDEX) {
code = metaGetTableTagsByUids(metaHandle, suid, uidList, tags);
} else {
code = metaGetTableTags(metaHandle, suid, uidList, tags);
}
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), suid);
terrno = code;
goto end;
}
} else {
qDebug("succ to get table tags from meta by tbname in cond, suid:%" PRIu64, suid);
}
if (suid != 0) {
removeInvalidTable(uidList, tags);
}
int32_t rows = taosArrayGetSize(uidList);
if (rows == 0) {
goto end;
}
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
goto end;
}
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
} else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
if (tag == NULL) {
continue;
}
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
} else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
}
}
}
pResBlock->info.rows = rows;
// int64_t st1 = taosGetTimestampUs();
// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
code = createResultData(&type, rows, &output);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
qError("failed to create result, reason:%s", tstrerror(code));
goto end;
}
code = scalarCalculate(pTagCond, pBlockList, &output);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to calculate scalar, reason:%s", tstrerror(code));
terrno = code;
goto end;
}
// int64_t st2 = taosGetTimestampUs();
// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
end:
taosHashCleanup(tags);
taosHashCleanup(ctx.colHash);
taosArrayDestroy(ctx.cInfoList);
blockDataDestroy(pResBlock);
taosArrayDestroy(pBlockList);
return output.columnData;
}
static void releaseColInfoData(void* pCol) {
if (pCol) {
SColumnInfoData* col = (SColumnInfoData*)pCol;
@ -560,12 +426,17 @@ static void releaseColInfoData(void* pCol) {
}
}
void freeItem(void* p) {
STUidTagInfo* pInfo = p;
if (pInfo->pTagVal != NULL) {
taosMemoryFree(pInfo->pTagVal);
}
}
int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
SHashObj* tags = NULL;
SArray* uidList = NULL;
void* keyBuf = NULL;
SArray* groupData = NULL;
@ -594,89 +465,26 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
REPLACE_NODE(pNode);
}
pResBlock = createDataBlock();
if (pResBlock == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
SColumnInfoData colInfo = {0};
colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
uidList = taosArrayInit(rows, sizeof(uint64_t));
SArray* pUidTagList = taosArrayInit(8, sizeof(STUidTagInfo));
for (int32_t i = 0; i < rows; ++i) {
STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
taosArrayPush(uidList, &pkeyInfo->uid);
STUidTagInfo info = {.uid = pkeyInfo->uid};
taosArrayPush(pUidTagList, &info);
}
// int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags);
code = metaGetTableTags(metaHandle, pTableListInfo->suid, pUidTagList);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
// int64_t stt1 = taosGetTimestampUs();
// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
int32_t numOfTables = taosArrayGetSize(pUidTagList);
pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle);
if (pResBlock == NULL) {
code = terrno;
goto end;
}
// int64_t st = taosGetTimestampUs();
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
} else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
ASSERT(tag);
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
} else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
}
}
}
pResBlock->info.rows = rows;
// int64_t st1 = taosGetTimestampUs();
// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
@ -784,12 +592,11 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
end:
taosMemoryFreeClear(keyBuf);
taosHashCleanup(tags);
taosHashCleanup(ctx.colHash);
taosArrayDestroy(ctx.cInfoList);
blockDataDestroy(pResBlock);
taosArrayDestroy(pBlockList);
taosArrayDestroy(uidList);
taosArrayDestroyEx(pUidTagList, freeItem);
taosArrayDestroyP(groupData, releaseColInfoData);
return code;
}
@ -858,6 +665,17 @@ static int tableUidCompare(const void* a, const void* b) {
return u1 < u2 ? -1 : 1;
}
static int32_t filterTableInfoCompare(const void* a, const void* b) {
STUidTagInfo* p1 = (STUidTagInfo*)a;
STUidTagInfo* p2 = (STUidTagInfo*)b;
if (p1->uid == p2->uid) {
return 0;
}
return p1->uid < p2->uid ? -1 : 1;
}
static FilterCondType checkTagCond(SNode* cond) {
if (nodeType(cond) == QUERY_NODE_OPERATOR) {
return FILTER_NO_LOGIC;
@ -867,17 +685,16 @@ static FilterCondType checkTagCond(SNode* cond) {
}
return FILTER_OTHER;
}
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* cond, SHashObj* tags) {
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* cond) {
int32_t ret = -1;
if (nodeType(cond) == QUERY_NODE_OPERATOR) {
ret = optimizeTbnameInCondImpl(metaHandle, suid, list, cond);
if (ret != -1) {
metaGetTableTagsByUids(metaHandle, suid, list, tags);
removeInvalidTable(list, tags);
}
int32_t ntype = nodeType(cond);
if (ntype == QUERY_NODE_OPERATOR) {
ret = optimizeTbnameInCondImpl(metaHandle, list, cond);
}
if (nodeType(cond) != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) {
if (ntype != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) {
return ret;
}
@ -893,45 +710,25 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list
SListCell* cell = pList->pHead;
for (int i = 0; i < len; i++) {
if (cell == NULL) break;
if (optimizeTbnameInCondImpl(metaHandle, suid, list, cell->pNode) == 0) {
if (optimizeTbnameInCondImpl(metaHandle, list, cell->pNode) == 0) {
hasTbnameCond = true;
break;
}
cell = cell->pNext;
}
taosArraySort(list, tableUidCompare);
taosArrayRemoveDuplicate(list, tableUidCompare, NULL);
taosArraySort(list, filterTableInfoCompare);
taosArrayRemoveDuplicate(list, filterTableInfoCompare, NULL);
if (hasTbnameCond) {
ret = metaGetTableTagsByUids(metaHandle, suid, list, tags);
removeInvalidTable(list, tags);
ret = metaGetTableTagsByUids(metaHandle, suid, list);
}
return ret;
}
/*
* handle invalid uid
*/
static int32_t removeInvalidTable(SArray* uids, SHashObj* tags) {
if (taosArrayGetSize(uids) <= 0) return 0;
SArray* validUid = taosArrayInit(taosArrayGetSize(uids), sizeof(int64_t));
for (int32_t i = 0; i < taosArrayGetSize(uids); i++) {
int64_t* uid = taosArrayGet(uids, i);
if (taosHashGet(tags, uid, sizeof(int64_t)) != NULL) {
taosArrayPush(validUid, uid);
}
}
taosArraySwap(uids, validUid);
taosArrayDestroy(validUid);
return 0;
}
static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond) {
// only return uid that does not contained in pExistedUidList
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond) {
if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) {
return -1;
}
@ -954,12 +751,13 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray*
SArray* pTbList = getTableNameList(pList);
int32_t numOfTables = taosArrayGetSize(pTbList);
SHashObj* uHash = NULL;
size_t listlen = taosArrayGetSize(list); // len > 0 means there already have uids
if (listlen > 0) {
uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < listlen; i++) {
int64_t* uid = taosArrayGet(list, i);
taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
size_t numOfExisted = taosArrayGetSize(pExistedUidList); // len > 0 means there already have uids
if (numOfExisted > 0) {
uHash = taosHashInit(numOfExisted / 0.7, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < numOfExisted; i++) {
STUidTagInfo* pTInfo = taosArrayGet(pExistedUidList, i);
taosHashPut(uHash, &pTInfo->uid, sizeof(uint64_t), &i, sizeof(i));
}
}
@ -971,7 +769,8 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray*
ETableType tbType = TSDB_TABLE_MAX;
if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) {
if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) {
taosArrayPush(list, &uid);
STUidTagInfo s = {.uid = uid, .name = name, .pTagVal = NULL};
taosArrayPush(pExistedUidList, &s);
}
} else {
taosArrayDestroy(pTbList);
@ -1008,132 +807,309 @@ static void genTagFilterDigest(const SNode* pTagCond, T_MD5_CTX* pContext) {
taosMemoryFree(payload);
}
static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* res, SNode* pTagCond, void* metaHandle,
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
void* metaHandle) {
SSDataBlock* pResBlock = createDataBlock();
if (pResBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
for (int32_t i = 0; i < taosArrayGetSize(pColList); ++i) {
SColumnInfoData colInfo = {0};
colInfo.info = *(SColumnInfo*)taosArrayGet(pColList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
int32_t code = blockDataEnsureCapacity(pResBlock, numOfTables);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
}
pResBlock->info.rows = numOfTables;
int32_t numOfCols = taosArrayGetSize(pResBlock->pDataBlock);
for (int32_t i = 0; i < numOfTables; i++) {
STUidTagInfo* p1 = taosArrayGet(pUidTagList, i);
for (int32_t j = 0; j < numOfCols; j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
if (p1->name != NULL) {
STR_TO_VARSTR(str, p1->name);
} else { // name is not retrieved during filter
metaGetTableNameByUid(metaHandle, p1->uid, str);
}
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
} else {
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
if (p1->pTagVal == NULL) {
colDataAppendNULL(pColInfo, i);
}
const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppendNULL(pColInfo, i);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
char* tmp = alloca(tagVal.nData + VARSTR_HEADER_SIZE + 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
qDebug("tagfilter varch:%s", tmp + 2);
#endif
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
} else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
}
}
}
return pResBlock;
}
static void doSetQualifiedUid(SArray* pUidList, const SArray* pUidTagList, bool* pResultList) {
taosArrayClear(pUidList);
int32_t numOfTables = taosArrayGetSize(pUidTagList);
for (int32_t i = 0; i < numOfTables; ++i) {
uint64_t uid = ((STUidTagInfo*)taosArrayGet(pUidTagList, i))->uid;
qDebug("tagfilter get uid:%" PRId64 ", res:%d", uid, pResultList[i]);
if (pResultList[i]) {
taosArrayPush(pUidList, &uid);
}
}
}
static void copyExistedUids(SArray* pUidTagList, const SArray* pUidList) {
int32_t numOfExisted = taosArrayGetSize(pUidList);
if (numOfExisted == 0) {
return;
}
for (int32_t i = 0; i < numOfExisted; ++i) {
uint64_t* uid = taosArrayGet(pUidList, i);
STUidTagInfo info = {.uid = *uid};
taosArrayPush(pUidTagList, &info);
}
}
static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SNode* pTagCond, void* metaHandle,
SIdxFltStatus status) {
if (pTagCond == NULL) {
return TSDB_CODE_SUCCESS;
}
terrno = TDB_CODE_SUCCESS;
SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond, status);
if (terrno != TDB_CODE_SUCCESS) {
colDataDestroy(pColInfoData);
taosMemoryFreeClear(pColInfoData);
taosArrayDestroy(res);
qError("failed to getColInfoResult, code: %s", tstrerror(terrno));
return terrno;
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
SScalarParam output = {0};
tagFilterAssist ctx = {0};
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
if (ctx.colHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
int32_t i = 0;
int32_t len = taosArrayGetSize(res);
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
if (ctx.cInfoList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
if (pColInfoData != NULL) {
bool* pResult = (bool*)pColInfoData->pData;
SArray* p = taosArrayInit(taosArrayGetSize(res), sizeof(uint64_t));
nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx);
while (i < len && pColInfoData) {
int64_t* uid = taosArrayGet(res, i);
qDebug("tagfilter get uid:%" PRId64 ", res:%d", *uid, pResult[i]);
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
if (pResult[i]) {
taosArrayPush(p, uid);
}
i += 1;
// int64_t stt = taosGetTimestampUs();
SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo));
copyExistedUids(pUidTagList, pUidList);
FilterCondType condType = checkTagCond(pTagCond);
int32_t filter = optimizeTbnameInCond(metaHandle, pListInfo->suid, pUidTagList, pTagCond);
if (filter == 0) { // tbname in filter is activated, do nothing and return
taosArrayClear(pUidList);
int32_t numOfRows = taosArrayGetSize(pUidTagList);
taosArrayEnsureCap(pUidList, numOfRows);
for (int32_t i = 0; i < numOfRows; ++i) {
STUidTagInfo* pInfo = taosArrayGet(pUidTagList, i);
taosArrayPush(pUidList, &pInfo->uid);
}
taosArraySwap(res, p);
taosArrayDestroy(p);
terrno = 0;
goto end;
} else {
if ((condType == FILTER_NO_LOGIC || condType == FILTER_AND) && status != SFLT_NOT_INDEX) {
code = metaGetTableTagsByUids(metaHandle, pListInfo->suid, pUidTagList);
} else {
code = metaGetTableTags(metaHandle, pListInfo->suid, pUidTagList);
}
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), pListInfo->suid);
terrno = code;
goto end;
}
}
colDataDestroy(pColInfoData);
taosMemoryFreeClear(pColInfoData);
int32_t numOfTables = taosArrayGetSize(pUidTagList);
if (numOfTables == 0) {
goto end;
}
return TSDB_CODE_SUCCESS;
pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle);
if (pResBlock == NULL) {
code = terrno;
goto end;
}
// int64_t st1 = taosGetTimestampUs();
// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
code = createResultData(&type, numOfTables, &output);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
goto end;
}
code = scalarCalculate(pTagCond, pBlockList, &output);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to calculate scalar, reason:%s", tstrerror(code));
terrno = code;
goto end;
}
doSetQualifiedUid(pUidList, pUidTagList, (bool*)output.columnData->pData);
end:
taosHashCleanup(ctx.colHash);
taosArrayDestroy(ctx.cInfoList);
blockDataDestroy(pResBlock);
taosArrayDestroy(pBlockList);
taosArrayDestroyEx(pUidTagList, freeItem);
colDataDestroy(output.columnData);
taosMemoryFreeClear(output.columnData);
return code;
}
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
STableListInfo* pListInfo) {
STableListInfo* pListInfo, const char* idstr) {
int32_t code = TSDB_CODE_SUCCESS;
size_t numOfTables = 0;
uint64_t tableUid = pScanNode->uid;
pListInfo->suid = pScanNode->suid;
SArray* res = taosArrayInit(8, sizeof(uint64_t));
SArray* pUidList = taosArrayInit(8, sizeof(uint64_t));
SIdxFltStatus status = SFLT_NOT_INDEX;
if (pScanNode->tableType != TSDB_SUPER_TABLE) {
if (metaIsTableExist(metaHandle, tableUid)) {
taosArrayPush(res, &tableUid);
if (metaIsTableExist(metaHandle, pScanNode->uid)) {
taosArrayPush(pUidList, &pScanNode->uid);
}
code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle, status);
code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle, status);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
} else {
// try to retrieve the result from meta cache
T_MD5_CTX context = {0};
genTagFilterDigest(pTagCond, &context);
bool acquired = false;
metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), res, &acquired);
if (acquired) {
qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(res));
goto _end;
}
} else {
T_MD5_CTX context = {0};
if (!pTagCond) { // no tag condition exists, let's fetch all tables of this super table
if (tsTagFilterCache) {
// try to retrieve the result from meta cache
genTagFilterDigest(pTagCond, &context);
bool acquired = false;
metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pUidList,
&acquired);
if (acquired) {
qDebug("retrieve table uid list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(pUidList));
goto _end;
}
}
if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table
ASSERT(pTagIndexCond == NULL);
vnodeGetCtbIdList(pVnode, pScanNode->suid, res);
vnodeGetCtbIdList(pVnode, pScanNode->suid, pUidList);
} else {
// failed to find the result in the cache, let try to calculate the results
if (pTagIndexCond) {
void* pIndex = tsdbGetIvtIdx(metaHandle);
SIndexMetaArg metaArg = {
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = pIndex, .suid = pScanNode->uid};
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
if (code != 0 || status == SFLT_NOT_INDEX) {
qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
SIdxFltStatus status = SFLT_NOT_INDEX;
code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status);
if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake
// qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
code = TDB_CODE_SUCCESS;
} else {
qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(res));
qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(pUidList));
}
}
}
code = doFilterByTagCond(pListInfo, res, pTagCond, metaHandle, status);
code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle, status);
if (code != TSDB_CODE_SUCCESS) {
return code;
goto _end;
}
// let's add the filter results into meta-cache
numOfTables = taosArrayGetSize(res);
size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t);
char* pPayload = taosMemoryMalloc(size);
*(int32_t*)pPayload = numOfTables;
numOfTables = taosArrayGetSize(pUidList);
if (numOfTables > 0) {
memcpy(pPayload + sizeof(int32_t), taosArrayGet(res, 0), numOfTables * sizeof(uint64_t));
if (tsTagFilterCache) {
size_t size = numOfTables * sizeof(uint64_t) + sizeof(int32_t);
char* pPayload = taosMemoryMalloc(size);
*(int32_t*)pPayload = numOfTables;
if (numOfTables > 0) {
memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t));
}
metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
}
metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
}
_end:
numOfTables = taosArrayGetSize(res);
numOfTables = taosArrayGetSize(pUidList);
for (int i = 0; i < numOfTables; i++) {
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(pUidList, i), .groupId = 0};
void* p = taosArrayPush(pListInfo->pTableList, &info);
if (p == NULL) {
taosArrayDestroy(res);
taosArrayDestroy(pUidList);
return TSDB_CODE_OUT_OF_MEMORY;
}
qTrace("tagfilter get uid:%" PRIu64 "", info.uid);
qTrace("tagfilter get uid:%" PRIu64 ", %s", info.uid, idstr);
}
taosArrayDestroy(res);
taosArrayDestroy(pUidList);
return code;
}
@ -1546,6 +1522,8 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
SFuncExecEnv env = {0};
pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId;
pCtx->isPseudoFunc = fmIsWindowPseudoColumnFunc(pCtx->functionId);
pCtx->isNotNullFunc = fmIsNotNullOutputFunc(pCtx->functionId);
if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) {
bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId);
@ -1553,7 +1531,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
tstrncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN);
pCtx->udfName = strdup(udfName);
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
@ -1859,7 +1837,7 @@ int32_t tableListAddTableInfo(STableListInfo* pTableList, uint64_t uid, uint64_t
int32_t tableListGetGroupList(const STableListInfo* pTableList, int32_t ordinalGroupIndex, STableKeyInfo** pKeyInfo,
int32_t* size) {
int32_t totalGroups = tableListGetOutputGroups(pTableList);
int32_t numOfTables = tableListGetSize(pTableList);
int32_t numOfTables = tableListGetSize(pTableList);
if (ordinalGroupIndex < 0 || ordinalGroupIndex >= totalGroups) {
return TSDB_CODE_INVALID_PARA;
@ -2042,7 +2020,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
return TSDB_CODE_INVALID_PARA;
}
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, idStr);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to getTableList, code: %s", tstrerror(code));
return code;

View File

@ -176,10 +176,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i
// set the number of rows in current disk page
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
memset((char*) pResultRow, 0, interBufSize);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
*currentPageId = pageId;
*currentPageId = pageId;
pData->num += interBufSize;
return pResultRow;
}
@ -363,7 +365,7 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC
pCtx[k].input.colDataSMAIsSet = false;
}
if (fmIsWindowPseudoColumnFunc(pCtx[k].functionId)) {
if (pCtx[k].isPseudoFunc) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[k]);
char* p = GET_ROWCELL_INTERBUF(pEntryInfo);
@ -395,19 +397,20 @@ void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pC
}
}
static void doSetInputDataBlockInfo(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order) {
static void doSetInputDataBlockInfo(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order, int32_t scanFlag) {
SqlFunctionCtx* pCtx = pExprSup->pCtx;
for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) {
pCtx[i].order = order;
pCtx[i].input.numOfRows = pBlock->info.rows;
setBlockSMAInfo(&pCtx[i], &pExprSup->pExprInfo[i], pBlock);
pCtx[i].pSrcBlock = pBlock;
pCtx[i].scanFlag = scanFlag;
}
}
void setInputDataBlock(SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, bool createDummyCol) {
if (pBlock->pBlockAgg != NULL) {
doSetInputDataBlockInfo(pExprSup, pBlock, order);
doSetInputDataBlockInfo(pExprSup, pBlock, order, scanFlag);
} else {
doSetInputDataBlock(pExprSup, pBlock, order, scanFlag, createDummyCol);
}
@ -537,7 +540,7 @@ bool functionNeedToExecute(SqlFunctionCtx* pCtx) {
return false;
}
if (pCtx->scanFlag == REPEAT_SCAN) {
if (pCtx->scanFlag == PRE_SCAN) {
return fmIsRepeatScanFunc(pCtx->functionId);
}
@ -817,7 +820,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
continue;
}
if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) {
if (pCtx[i].isPseudoFunc) {
continue;
}
@ -1075,7 +1078,7 @@ void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t numOfExpr
pRow->numOfRows = pResInfo->numOfRes;
}
if (fmIsNotNullOutputFunc(pCtx[j].functionId)) {
if (pCtx[j].isNotNullFunc) {
returnNotNull = true;
}
}
@ -1199,9 +1202,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
}
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
ASSERT(pBlock->info.rows > 0);
releaseBufPage(pBuf, page);
break;
if (pBlock->info.rows <= 0 || pRow->numOfRows > pBlock->info.capacity) {
qError("error in copy data to ssdatablock, existed rows in block:%d, rows in pRow:%d, capacity:%d, %s",
pBlock->info.rows, pRow->numOfRows, pBlock->info.capacity, GET_TASKID(pTaskInfo));
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR);
} else {
break;
}
}
pGroupResInfo->index += 1;
@ -1742,12 +1751,12 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
// _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
pAggSup->pResultRowHashTable = tSimpleHashInit(100, hashFn);
pAggSup->pResultRowHashTable = tSimpleHashInit(100, taosFastHash);
if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -1832,6 +1841,10 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
if (pCtx[i].udfName != NULL) {
taosMemoryFree(pCtx[i].udfName);
}
}
taosMemoryFreeClear(pCtx);
@ -1962,6 +1975,22 @@ void destroyAggOperatorInfo(void* param) {
taosMemoryFreeClear(param);
}
static char* buildTaskId(uint64_t taskId, uint64_t queryId) {
char* p = taosMemoryMalloc(64);
int32_t offset = 6;
memcpy(p, "TID:0x", offset);
offset += tintToHex(taskId, &p[offset]);
memcpy(&p[offset], " QID:0x", 7);
offset += 7;
offset += tintToHex(queryId, &p[offset]);
p[offset] = 0;
return p;
}
static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPTR_EXEC_MODEL model, char* dbFName) {
SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
if (pTaskInfo == NULL) {
@ -1972,16 +2001,13 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTaskInfo->schemaInfo.dbname = strdup(dbFName);
pTaskInfo->id.queryId = queryId;
pTaskInfo->execModel = model;
pTaskInfo->pTableInfoList = tableListCreate();
pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo));
pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES);
char* p = taosMemoryCalloc(1, 128);
snprintf(p, 128, "TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, queryId);
pTaskInfo->id.str = p;
pTaskInfo->id.queryId = queryId;
pTaskInfo->id.str = buildTaskId(taskId, queryId);
return pTaskInfo;
}

View File

@ -832,10 +832,13 @@ static bool checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t group
return true;
}
static void buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFillSup, TSKEY ts, SSDataBlock* pBlock) {
static bool buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFillSup, TSKEY ts, SSDataBlock* pBlock) {
if (pBlock->info.rows >= pBlock->info.capacity) {
return false;
}
uint64_t groupId = pBlock->info.id.groupId;
if (pFillSup->hasDelete && !checkResult(pFillSup, ts, groupId)) {
return;
return true;
}
for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) {
SFillColInfo* pFillCol = pFillSup->pAllColInfo + i;
@ -853,6 +856,7 @@ static void buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFill
}
}
pBlock->info.rows++;
return true;
}
static bool hasRemainCalc(SStreamFillInfo* pFillInfo) {
@ -932,7 +936,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter*
}
if (pFillInfo->pos == FILL_POS_START) {
buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes);
if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) {
pFillInfo->pos = FILL_POS_INVALID;
}
}
if (pFillInfo->type != TSDB_FILL_LINEAR) {
doStreamFillNormal(pFillSup, pFillInfo, pRes);
@ -940,7 +946,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter*
doStreamFillLinear(pFillSup, pFillInfo, pRes);
if (pFillInfo->pos == FILL_POS_MID) {
buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes);
if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) {
pFillInfo->pos = FILL_POS_INVALID;
}
}
if (pFillInfo->current > pFillInfo->end && pFillInfo->pLinearInfo->hasNext) {
@ -954,7 +962,9 @@ static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter*
}
}
if (pFillInfo->pos == FILL_POS_END) {
buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes);
if (buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes)) {
pFillInfo->pos = FILL_POS_INVALID;
}
}
}
@ -989,10 +999,6 @@ static void doStreamFillImpl(SOperatorInfo* pOperator) {
uint64_t groupId = pBlock->info.id.groupId;
SSDataBlock* pRes = pInfo->pRes;
pRes->info.id.groupId = groupId;
if (hasRemainCalc(pFillInfo)) {
doStreamFillRange(pFillInfo, pFillSup, pRes);
}
SColumnInfoData* pTsCol = taosArrayGet(pInfo->pSrcBlock->pDataBlock, pInfo->primaryTsCol);
TSKEY* tsCol = (TSKEY*)pTsCol->pData;
@ -1204,13 +1210,14 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
return NULL;
}
blockDataCleanup(pInfo->pRes);
if (pOperator->status == OP_RES_TO_RETURN) {
if (hasRemainCalc(pInfo->pFillInfo)) {
doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes);
if (pInfo->pRes->info.rows > 0) {
return pInfo->pRes;
}
if (hasRemainCalc(pInfo->pFillInfo) || (pInfo->pFillInfo->pos != FILL_POS_INVALID && pInfo->pFillInfo->needFill == true )) {
doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes);
if (pInfo->pRes->info.rows > 0) {
printDataBlock(pInfo->pRes, "stream fill");
return pInfo->pRes;
}
}
if (pOperator->status == OP_RES_TO_RETURN) {
doDeleteFillFinalize(pOperator);
if (pInfo->pRes->info.rows > 0) {
printDataBlock(pInfo->pRes, "stream fill");

View File

@ -703,7 +703,8 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->base.scanFlag = REPEAT_SCAN;
pTableScanInfo->base.scanFlag = MAIN_SCAN;
pTableScanInfo->base.dataBlockLoadFlag = FUNC_DATA_REQUIRED_DATA_LOAD;
qDebug("start to repeat ascending order scan data blocks due to query func required, %s", GET_TASKID(pTaskInfo));
// do prepare for the next round table scan operation
@ -729,7 +730,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < total) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->base.scanFlag = REPEAT_SCAN;
pTableScanInfo->base.scanFlag = MAIN_SCAN;
qDebug("%s start to repeat descending order scan data blocks", GET_TASKID(pTaskInfo));
tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
@ -785,6 +786,10 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
if (pInfo->pResBlock->info.capacity > pOperator->resultInfo.capacity) {
pOperator->resultInfo.capacity = pInfo->pResBlock->info.capacity;
}
}
SSDataBlock* result = doGroupedTableScan(pOperator);
@ -877,8 +882,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
pInfo->base.scanFlag = (pInfo->scanInfo.numOfAsc > 1) ? PRE_SCAN : MAIN_SCAN;
pInfo->base.scanFlag = MAIN_SCAN;
pInfo->base.pdInfo.interval = extractIntervalInfo(pTableScanNode);
pInfo->base.readHandle = *readHandle;
pInfo->base.dataBlockLoadFlag = pTableScanNode->dataRequired;
@ -888,7 +893,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
// blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
code = filterInitFromNode((SNode*)pTableScanNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
if (code != TSDB_CODE_SUCCESS) {
@ -1175,6 +1180,20 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
}
}
static int32_t getPreSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
SSessionKey* pKey) {
pKey->win.skey = startTs;
pKey->win.ekey = endTs;
pKey->groupId = groupId;
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev(pAggSup->pState, pKey);
int32_t code = streamStateSessionGetKVByCur(pCur, pKey, NULL, 0);
if (code != TSDB_CODE_SUCCESS) {
SET_SESSION_WIN_KEY_INVALID(pKey);
}
return code;
}
static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
blockDataCleanup(pDestBlock);
if (pSrcBlock->info.rows == 0) {
@ -1210,7 +1229,14 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
}
SSessionKey endWin = {0};
getCurSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, &endWin);
ASSERT(!IS_INVALID_SESSION_WIN_KEY(endWin));
if (IS_INVALID_SESSION_WIN_KEY(endWin)) {
getPreSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, &endWin);
}
if (IS_INVALID_SESSION_WIN_KEY(startWin)) {
// window has been closed.
qError("generate session scan range failed. rang start:%" PRIx64 ", end:%" PRIx64, startData[i], endData[i]);
continue;
}
colDataAppend(pDestStartCol, i, (const char*)&startWin.win.skey, false);
colDataAppend(pDestEndCol, i, (const char*)&endWin.win.ekey, false);
@ -1433,7 +1459,7 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
dumyInfo.cur.pageId = -1;
bool isClosed = false;
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
bool overDue = isOverdue(tsCol[rowId], &pInfo->twAggSup);
bool overDue = isOverdue(tsCol[rowId], &pInfo->twAggSup);
if (pInfo->igExpired && overDue) {
continue;
}
@ -1607,19 +1633,20 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
while (1) {
SFetchRet ret = {0};
tqNextBlock(pInfo->tqReader, &ret);
if (tqNextBlock(pInfo->tqReader, &ret) < 0) {
qError("failed to get next log block since %s", terrstr());
}
if (ret.fetchType == FETCH_TYPE__DATA) {
blockDataCleanup(pInfo->pRes);
if (setBlockIntoRes(pInfo, &ret.data, true) < 0) {
ASSERT(0);
}
setBlockIntoRes(pInfo, &ret.data, true);
if (pInfo->pRes->info.rows > 0) {
pOperator->status = OP_EXEC_RECV;
qDebug("queue scan log return %d rows", pInfo->pRes->info.rows);
return pInfo->pRes;
}
} else if (ret.fetchType == FETCH_TYPE__META) {
ASSERT(0);
qError("unexpected ret.fetchType:%d", ret.fetchType);
continue;
// pTaskInfo->streamInfo.lastStatus = ret.offset;
// pTaskInfo->streamInfo.metaBlk = ret.meta;
// return NULL;
@ -1646,7 +1673,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
return NULL;
#endif
} else {
ASSERT(0);
qError("unexpected streamInfo prepare type: %d", pTaskInfo->streamInfo.prepareStatus.type);
return NULL;
}
}
@ -2305,13 +2332,6 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
pTSInfo->base.dataReader = NULL;
pTaskInfo->streamInfo.lastStatus.uid = -1;
// code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, pTSInfo->pResBlock,
// &pTSInfo->base.dataReader, NULL);
// if (code != 0) {
// terrno = code;
// destroyTableScanOperatorInfo(pTableScanOp);
// goto _error;
// }
}
if (pHandle->initTqReader) {
@ -2786,6 +2806,10 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = NULL;
while (pInfo->tableStartIndex < tableListSize) {
if (isTaskKilled(pTaskInfo)) {
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity,
pOperator);
if (pBlock != NULL) {

View File

@ -2254,7 +2254,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
}
pInfo->readHandle = *readHandle;
pInfo->uid = pBlockScanNode->suid;
pInfo->uid = (pBlockScanNode->suid != 0)? pBlockScanNode->suid:pBlockScanNode->uid;
int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols);

View File

@ -119,8 +119,8 @@ static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsL
pRowSup->groupId = groupId;
}
FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey,
int32_t pos, int32_t order, int64_t* pData) {
FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int32_t pos,
int32_t order, int64_t* pData) {
int32_t forwardRows = 0;
if (order == TSDB_ORDER_ASC) {
@ -2853,6 +2853,8 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t nums) {
for (int i = 0; i < nums; i++) {
pDummy[i].functionId = pCtx[i].functionId;
pDummy[i].isNotNullFunc = pCtx[i].isNotNullFunc;
pDummy[i].isPseudoFunc = pCtx[i].isPseudoFunc;
}
}
@ -3377,9 +3379,11 @@ static void copyDeleteWindowInfo(SArray* pResWins, SSHashObj* pStDeleted) {
}
}
// the allocated memory comes from outer function.
void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) {
pGroupResInfo->pRows = pArrayList;
pGroupResInfo->index = 0;
pGroupResInfo->pBuf = NULL;
}
void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroupResInfo* pGroupResInfo,
@ -3390,8 +3394,7 @@ void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroup
blockDataCleanup(pBlock);
if (!hasRemainResults(pGroupResInfo)) {
taosArrayDestroy(pGroupResInfo->pRows);
pGroupResInfo->pRows = NULL;
cleanupGroupResInfo(pGroupResInfo);
return;
}
@ -4826,6 +4829,12 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
tSimpleHashCleanup(pInfo->pUpdatedMap);
pInfo->pUpdatedMap = NULL;
#if 0
char* pBuf = streamStateIntervalDump(pInfo->pState);
qDebug("===stream===interval state%s", pBuf);
taosMemoryFree(pBuf);
#endif
doBuildDeleteResult(pInfo, pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
if (pInfo->pDelRes->info.rows > 0) {
printDataBlock(pInfo->pDelRes, "single interval delete");

View File

@ -123,8 +123,6 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
}
static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket* pBucket) {
ASSERT(pPage != NULL && pNode != NULL && pBucket->size >= 1);
int32_t len = GET_LHASH_NODE_LEN(pNode);
char* p = (char*)pNode + len;
@ -301,8 +299,6 @@ void* tHashCleanup(SLHashObj* pHashObj) {
}
int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data, size_t size) {
ASSERT(pHashObj != NULL && key != NULL);
if (pHashObj->bits == 0) {
SLHashBucket* pBucket = pHashObj->pBucket[0];
doAddToBucket(pHashObj, pBucket, 0, key, keyLen, data, size);
@ -363,14 +359,12 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data
if (v1 != splitBucketId) { // place it into the new bucket
ASSERT(v1 == newBucketId);
// printf("move key:%d to 0x%x bucket, remain items:%d\n", *(int32_t*)k, v1, pBucket->size - 1);
SLHashBucket* pNewBucket = pHashObj->pBucket[newBucketId];
doAddToBucket(pHashObj, pNewBucket, newBucketId, (void*)GET_LHASH_NODE_KEY(pNode), pNode->keyLen,
GET_LHASH_NODE_KEY(pNode), pNode->dataLen);
doRemoveFromBucket(p, pNode, pBucket);
} else {
// printf("check key:%d, located into: %d, skip it\n", *(int*) k, v1);
int32_t nodeSize = GET_LHASH_NODE_LEN(pStart);
pStart += nodeSize;
}
@ -385,7 +379,6 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data
}
char* tHashGet(SLHashObj* pHashObj, const void* key, size_t keyLen) {
ASSERT(pHashObj != NULL && key != NULL && keyLen > 0);
int32_t hashv = pHashObj->hashFn(key, keyLen);
int32_t bucketId = doGetBucketIdFromHashVal(hashv, pHashObj->bits);

View File

@ -504,27 +504,45 @@ static int32_t translateTimezone(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (2 != LIST_LENGTH(pFunc->pParameterList)) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
if (numOfParams < 2 || numOfParams > 11) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
// param1
SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
if (pValue->datum.i < 0 || pValue->datum.i > 100) {
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
}
pValue->notReserved = true;
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
if (!IS_NUMERIC_TYPE(para1Type) || (!IS_SIGNED_NUMERIC_TYPE(para2Type) && !IS_UNSIGNED_NUMERIC_TYPE(para2Type))) {
if (!IS_NUMERIC_TYPE(para1Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
for (int32_t i = 1; i < numOfParams; ++i) {
SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, i);
pValue->notReserved = true;
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
if (!IS_NUMERIC_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
double v = 0;
if (IS_INTEGER_TYPE(paraType)) {
v = (double)pValue->datum.i;
} else {
v = pValue->datum.d;
}
if (v < 0 || v > 100) {
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
}
}
// set result type
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
if (numOfParams > 2) {
pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_VARCHAR};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
}
return TSDB_CODE_SUCCESS;
}
@ -2273,8 +2291,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "percentile",
.type = FUNCTION_TYPE_PERCENTILE,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_REPEAT_SCAN_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_REPEAT_SCAN_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_FORBID_STREAM_FUNC,
.translateFunc = translatePercentile,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getPercentileFuncEnv,
.initFunc = percentileFunctionSetup,
.processFunc = percentileFunction,

View File

@ -789,17 +789,46 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0) ? 1 : 0;
if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = GET_FLOAT_VAL(&pRes->v);
colDataAppend(pCol, currentRow, (const char*)&v, pEntryInfo->isNullRes);
// NOTE: do nothing change it, for performance issue
if (!pEntryInfo->isNullRes) {
switch (pCol->info.type) {
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BIGINT:
((int64_t*)pCol->pData)[currentRow] = pRes->v;
// colDataAppendInt64(pCol, currentRow, &pRes->v);
break;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
colDataAppendInt32(pCol, currentRow, (int32_t*)&pRes->v);
break;
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_SMALLINT:
colDataAppendInt16(pCol, currentRow, (int16_t*)&pRes->v);
break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
colDataAppendInt8(pCol, currentRow, (int8_t*)&pRes->v);
break;
case TSDB_DATA_TYPE_DOUBLE:
colDataAppendDouble(pCol, currentRow, (double*)&pRes->v);
break;
case TSDB_DATA_TYPE_FLOAT: {
float v = GET_FLOAT_VAL(&pRes->v);
colDataAppendFloat(pCol, currentRow, &v);
break;
}
}
} else {
colDataAppend(pCol, currentRow, (const char*)&pRes->v, pEntryInfo->isNullRes);
colDataAppendNULL(pCol, currentRow);
}
if (pEntryInfo->numOfRes > 0) {
code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow);
} else {
code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow);
if (pCtx->subsidiaries.num > 0) {
if (pEntryInfo->numOfRes > 0) {
code = setSelectivityValue(pCtx, pBlock, &pRes->tuplePos, currentRow);
} else {
code = setSelectivityValue(pCtx, pBlock, &pRes->nullTuplePos, currentRow);
}
}
return code;
@ -1570,7 +1599,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
int32_t type = pCol->info.type;
SPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
if (pCtx->scanFlag == REPEAT_SCAN && pInfo->stage == 0) {
if (pCtx->scanFlag == MAIN_SCAN && pInfo->stage == 0) {
pInfo->stage += 1;
// all data are null, set it completed
@ -1653,26 +1682,67 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
}
int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SVariant* pVal = &pCtx->param[1].param;
int32_t code = 0;
double v = 0;
GET_TYPED_DATA(v, double, pVal->nType, &pVal->i);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo* ppInfo = (SPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo);
int32_t code = 0;
double v = 0;
tMemBucket* pMemBucket = ppInfo->pMemBucket;
if (pMemBucket != NULL && pMemBucket->total > 0) { // check for null
code = getPercentile(pMemBucket, v, &ppInfo->result);
if (pMemBucket == NULL || pMemBucket->total == 0) { // check for null
code = TSDB_CODE_FAILED;
goto _fin_error;
}
if (pCtx->numOfParams > 2) {
char buf[512] = {0};
size_t len = 1;
varDataVal(buf)[0] = '[';
for (int32_t i = 1; i < pCtx->numOfParams; ++i) {
SVariant* pVal = &pCtx->param[i].param;
GET_TYPED_DATA(v, double, pVal->nType, &pVal->i);
int32_t code = getPercentile(pMemBucket, v, &ppInfo->result);
if (code != TSDB_CODE_SUCCESS) {
goto _fin_error;
}
if (i == pCtx->numOfParams - 1) {
len += snprintf(varDataVal(buf) + len, sizeof(buf) - VARSTR_HEADER_SIZE - len, "%.6lf]", ppInfo->result);
} else {
len += snprintf(varDataVal(buf) + len, sizeof(buf) - VARSTR_HEADER_SIZE - len, "%.6lf, ", ppInfo->result);
}
}
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
varDataSetLen(buf, len);
colDataAppend(pCol, pBlock->info.rows, buf, false);
tMemBucketDestroy(pMemBucket);
return pResInfo->numOfRes;
} else {
SVariant* pVal = &pCtx->param[1].param;
GET_TYPED_DATA(v, double, pVal->nType, &pVal->i);
code = getPercentile(pMemBucket, v, &ppInfo->result);
if (code != TSDB_CODE_SUCCESS) {
goto _fin_error;
}
tMemBucketDestroy(pMemBucket);
return functionFinalize(pCtx, pBlock);
}
_fin_error:
tMemBucketDestroy(pMemBucket);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
return code;
return functionFinalize(pCtx, pBlock);
}
bool getApercentileFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {

View File

@ -61,6 +61,8 @@
} \
}
#define GET_INVOKE_INTRINSIC_THRESHOLD(_bits, _bytes) ((_bits) / ((_bytes) << 3u))
static void calculateRounds(int32_t numOfRows, int32_t bytes, int32_t* remainder, int32_t* rounds, int32_t* width) {
const int32_t bitWidth = 256;
@ -372,7 +374,7 @@ static void handleInt8Col(const void* data, int32_t start, int32_t numOfRows, SM
pBuf->v = i8VectorCmpAVX2(data, numOfRows, isMinFunc, signVal);
} else {
if (!pBuf->assign) {
pBuf->v = ((int8_t*)data)[0];
pBuf->v = ((int8_t*)data)[start];
}
if (signVal) {
@ -406,7 +408,7 @@ static void handleInt16Col(const void* data, int32_t start, int32_t numOfRows, S
pBuf->v = i16VectorCmpAVX2(data, numOfRows, isMinFunc, signVal);
} else {
if (!pBuf->assign) {
pBuf->v = ((int16_t*)data)[0];
pBuf->v = ((int16_t*)data)[start];
}
if (signVal) {
@ -440,7 +442,7 @@ static void handleInt32Col(const void* data, int32_t start, int32_t numOfRows, S
pBuf->v = i32VectorCmpAVX2(data, numOfRows, isMinFunc, signVal);
} else {
if (!pBuf->assign) {
pBuf->v = ((int32_t*)data)[0];
pBuf->v = ((int32_t*)data)[start];
}
if (signVal) {
@ -470,7 +472,7 @@ static void handleInt32Col(const void* data, int32_t start, int32_t numOfRows, S
static void handleInt64Col(const void* data, int32_t start, int32_t numOfRows, SMinmaxResInfo* pBuf, bool isMinFunc,
bool signVal) {
if (!pBuf->assign) {
pBuf->v = ((int64_t*)data)[0];
pBuf->v = ((int64_t*)data)[start];
}
if (signVal) {
@ -504,7 +506,7 @@ static void handleFloatCol(SColumnInfoData* pCol, int32_t start, int32_t numOfRo
*val = floatVectorCmpAVX(pData, numOfRows, isMinFunc);
} else {
if (!pBuf->assign) {
*val = pData[0];
*val = pData[start];
}
if (isMinFunc) { // min
@ -535,7 +537,7 @@ static void handleDoubleCol(SColumnInfoData* pCol, int32_t start, int32_t numOfR
*val = (double)doubleVectorCmpAVX(pData, numOfRows, isMinFunc);
} else {
if (!pBuf->assign) {
*val = pData[0];
*val = pData[start];
}
if (isMinFunc) { // min
@ -700,8 +702,29 @@ static void doExtractVal(SColumnInfoData* pCol, int32_t i, int32_t end, SqlFunct
}
}
static int32_t saveRelatedTuple(SqlFunctionCtx* pCtx, SInputColumnInfoData* pInput, int32_t index, void* tval) {
SColumnInfoData* pCol = pInput->pData[0];
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SMinmaxResInfo* pBuf = GET_ROWCELL_INTERBUF(pResInfo);
int32_t code = 0;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
return code;
}
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems) {
int32_t numOfElems = 0;
int32_t code = TSDB_CODE_SUCCESS;
SInputColumnInfoData* pInput = &pCtx->input;
SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0];
@ -719,6 +742,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
// data in current data block are qualified to the query
if (pInput->colDataSMAIsSet) {
numOfElems = pInput->numOfRows - pAgg->numOfNull;
if (numOfElems == 0) {
goto _over;
@ -734,15 +758,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
pBuf->v = GET_INT64_VAL(tval);
}
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
} else {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
int64_t prev = 0;
@ -751,15 +767,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
int64_t val = GET_INT64_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_INT64_VAL(&pBuf->v) = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
uint64_t prev = 0;
@ -768,15 +776,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
uint64_t val = GET_UINT64_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_UINT64_VAL(&pBuf->v) = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
double prev = 0;
@ -785,15 +785,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
double val = GET_DOUBLE_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_DOUBLE_VAL(&pBuf->v) = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
float prev = 0;
@ -802,16 +794,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
float val = GET_DOUBLE_VAL(tval);
if ((prev < val) ^ isMinFunc) {
GET_FLOAT_VAL(&pBuf->v) = val;
}
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
if (index >= 0) {
int32_t code = saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
code = saveRelatedTuple(pCtx, pInput, index, tval);
}
}
}
@ -825,14 +808,51 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
int32_t numOfRows = pInput->numOfRows;
int32_t end = start + numOfRows;
if (pCol->hasNull || numOfRows < 32 || pCtx->subsidiaries.num > 0) {
// clang-format off
int32_t threshold[] = {
//NULL, BOOL, TINYINT, SMALLINT, INT, BIGINT, FLOAT, DOUBLE, VARCHAR, TIMESTAMP, NCHAR,
INT32_MAX, INT32_MAX, 32, 16, 8, 4, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX,
// UTINYINT,USMALLINT, UINT, UBIGINT, JSON, VARBINARY, DECIMAL, BLOB, MEDIUMBLOB, BINARY
32, 16, 8, 4, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX, INT32_MAX,
};
// clang-format on
if (pCol->hasNull || numOfRows < threshold[pCol->info.type] || pCtx->subsidiaries.num > 0) {
int32_t i = findFirstValPosition(pCol, start, numOfRows);
if ((i < end) && (!pBuf->assign)) {
memcpy(&pBuf->v, pCol->pData + (pCol->info.bytes * i), pCol->info.bytes);
char* p = pCol->pData + pCol->info.bytes * i;
switch (type) {
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BIGINT:
pBuf->v = *(int64_t*)p;
break;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
pBuf->v = *(int32_t*)p;
break;
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_SMALLINT:
pBuf->v = *(int16_t*)p;
break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
pBuf->v = *(int8_t*)p;
break;
case TSDB_DATA_TYPE_FLOAT: {
*(float*)&pBuf->v = *(float*)p;
break;
}
default:
memcpy(&pBuf->v, p, pCol->info.bytes);
break;
}
if (pCtx->subsidiaries.num > 0) {
int32_t code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
code = saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -849,7 +869,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
} else {
numOfElems = numOfRows;
switch (pCol->info.type) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
handleInt8Col(pCol->pData, start, numOfRows, pBuf, isMinFunc, true);
@ -898,13 +918,14 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
_over:
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) {
int32_t code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
code = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pBuf->nullTupleSaved = true;
}
*nElems = numOfElems;
return TSDB_CODE_SUCCESS;
return code;
}

View File

@ -354,8 +354,6 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT
* in memory bucket, we only accept data array list
*/
int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
ASSERT(pBucket != NULL && data != NULL && size > 0);
int32_t count = 0;
int32_t bytes = pBucket->bytes;
for (int32_t i = 0; i < size; ++i) {

View File

@ -812,7 +812,7 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
block->info.hasVarCol = IS_VAR_DATA_TYPE(udfCol->colMeta.type);
block->pDataBlock = taosArrayInit(1, sizeof(SColumnInfoData));
taosArraySetSize(block->pDataBlock, 1);
taosArrayPush(block->pDataBlock, &(SColumnInfoData){0});
SColumnInfoData *col = taosArrayGet(block->pDataBlock, 0);
SUdfColumnMeta *meta = &udfCol->colMeta;
col->info.precision = meta->precision;

View File

@ -315,6 +315,11 @@ static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int32_t len) {
}
static int32_t tlvDecodeValueImpl(STlvDecoder* pDecoder, void* pValue, int32_t len) {
// compatible with lower version messages
if (pDecoder->bufSize == pDecoder->offset) {
memset(pValue, 0, len);
return TSDB_CODE_SUCCESS;
}
if (len > pDecoder->bufSize - pDecoder->offset) {
return TSDB_CODE_FAILED;
}

View File

@ -568,6 +568,7 @@ stream_options(A) ::= stream_options(B) TRIGGER MAX_DELAY duration_literal(C).
stream_options(A) ::= stream_options(B) WATERMARK duration_literal(C). { ((SStreamOptions*)B)->pWatermark = releaseRawExprNode(pCxt, C); A = B; }
stream_options(A) ::= stream_options(B) IGNORE EXPIRED NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreExpired = taosStr2Int8(C.z, NULL, 10); A = B; }
stream_options(A) ::= stream_options(B) FILL_HISTORY NK_INTEGER(C). { ((SStreamOptions*)B)->fillHistory = taosStr2Int8(C.z, NULL, 10); A = B; }
stream_options(A) ::= stream_options(B) DELETE_MARK duration_literal(C). { ((SStreamOptions*)B)->pDeleteMark = releaseRawExprNode(pCxt, C); A = B; }
stream_options(A) ::= stream_options(B) IGNORE UPDATE NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreUpdate = taosStr2Int8(C.z, NULL, 10); A = B; }
subtable_opt(A) ::= . { A = NULL; }

View File

@ -666,6 +666,9 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) {
if (isSetOperator(pCurrStmt)) {
return ((SSetOperator*)pCurrStmt)->precision;
}
if (NULL != pCurrStmt && QUERY_NODE_CREATE_STREAM_STMT == nodeType(pCurrStmt)) {
return getPrecisionFromCurrStmt(((SCreateStreamStmt*)pCurrStmt)->pQuery, defaultVal);
}
return defaultVal;
}
@ -1464,6 +1467,15 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
SNode* pTable = pSelect->pFromTable;
if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) ||
(TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc || pSelect->hasIndefiniteRowsFunc) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
@ -5634,16 +5646,6 @@ static bool crossTableWithUdaf(SSelectStmt* pSelect) {
}
static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
if (NULL != pStmt->pOptions->pWatermark &&
(DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pWatermark))) {
return pCxt->errCode;
}
if (NULL != pStmt->pOptions->pDelay &&
(DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pDelay))) {
return pCxt->errCode;
}
if (NULL == pStmt->pQuery) {
return TSDB_CODE_SUCCESS;
}
@ -5951,6 +5953,7 @@ static int32_t adjustOrderOfProjections(STranslateContext* pCxt, SNodeList* pCol
}
int32_t code = TSDB_CODE_SUCCESS;
bool hasPrimaryKey = false;
SNode* pCol = NULL;
SNode* pProj = NULL;
FORBOTH(pCol, pCols, pProj, *pProjections) {
@ -5964,6 +5967,14 @@ static int32_t adjustOrderOfProjections(STranslateContext* pCxt, SNodeList* pCol
if (TSDB_CODE_SUCCESS != code) {
break;
}
if (PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) {
hasPrimaryKey = true;
}
}
if (TSDB_CODE_SUCCESS == code && !hasPrimaryKey) {
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM,
"primary timestamp column can not be null");
}
SNodeList* pNewProjections = NULL;
@ -6006,7 +6017,15 @@ static int32_t adjustProjectionsForExistTable(STranslateContext* pCxt, SCreateSt
return adjustOrderOfProjections(pCxt, pStmt->pCols, pMeta, &pSelect->pProjectionList, pReq);
}
static bool isGroupIdTagStream(const STableMeta* pMeta, SNodeList* pTags) {
return (NULL == pTags && 1 == pMeta->tableInfo.numOfTags && TSDB_DATA_TYPE_UBIGINT == getTableTagSchema(pMeta)->type);
}
static int32_t adjustDataTypeOfTags(STranslateContext* pCxt, const STableMeta* pMeta, SNodeList* pTags) {
if (isGroupIdTagStream(pMeta, pTags)) {
return TSDB_CODE_SUCCESS;
}
if (getNumOfTags(pMeta) != LIST_LENGTH(pTags)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of tags");
}
@ -6207,6 +6226,17 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt
return code;
}
static int32_t translateStreamOptions(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
pCxt->pCurrStmt = (SNode*)pStmt;
SStreamOptions* pOptions = pStmt->pOptions;
if ((NULL != pOptions->pWatermark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pWatermark))) ||
(NULL != pOptions->pDeleteMark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pDeleteMark))) ||
(NULL != pOptions->pDelay && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pOptions->pDelay)))) {
return pCxt->errCode;
}
return TSDB_CODE_SUCCESS;
}
static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) {
pReq->igExists = pStmt->ignoreExists;
@ -6228,10 +6258,16 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt*
}
}
if (TSDB_CODE_SUCCESS == code) {
code = translateStreamOptions(pCxt, pStmt);
}
if (TSDB_CODE_SUCCESS == code) {
pReq->triggerType = pStmt->pOptions->triggerType;
pReq->maxDelay = (NULL != pStmt->pOptions->pDelay ? ((SValueNode*)pStmt->pOptions->pDelay)->datum.i : 0);
pReq->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0);
pReq->deleteMark =
(NULL != pStmt->pOptions->pDeleteMark ? ((SValueNode*)pStmt->pOptions->pDeleteMark)->datum.i : 0);
pReq->fillHistory = pStmt->pOptions->fillHistory;
pReq->igExpired = pStmt->pOptions->ignoreExpired;
if (pReq->createStb) {

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More