Merge branch '3.0' of https://github.com/taosdata/TDengine into refact/tsdb_optimize

This commit is contained in:
Hongze Cheng 2022-09-01 09:51:33 +08:00
commit 4271911b50
167 changed files with 5160 additions and 5637 deletions

View File

@ -88,4 +88,3 @@ Standard: Auto
TabWidth: 8
UseTab: Never
...

1
.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.py linguist-detectable=false

1
.gitignore vendored
View File

@ -1,5 +1,6 @@
build/
compile_commands.json
CMakeSettings.json
.cache
.ycm_extra_conf.py
.tasks

View File

@ -1,25 +0,0 @@
{
"configurations": [
{
"name": "WSL-GCC-Debug",
"generator": "Unix Makefiles",
"configurationType": "Debug",
"buildRoot": "${projectDir}\\build\\",
"installRoot": "${projectDir}\\build\\",
"cmakeExecutable": "/usr/bin/cmake",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "linux_x64" ],
"wslPath": "${defaultWSLPath}",
"addressSanitizerRuntimeFlags": "detect_leaks=0",
"variables": [
{
"name": "CMAKE_INSTALL_PREFIX",
"value": "/mnt/d/TDengine/TDengine/build",
"type": "PATH"
}
]
}
]
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

View File

@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
MESSAGE("Current system arch is arm64")
MESSAGE("Current system arch is 64")
SET(TD_DARWIN_64 TRUE)
ADD_DEFINITIONS("-D_TD_DARWIN_64")
ENDIF ()

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG aa45ad4
GIT_TAG 9cb965f
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -16,7 +16,7 @@ To achieve high performance writing, there are a few aspects to consider. In the
From the perspective of application program, you need to consider:
1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. It can be configured by parameter `maxSQLLength` on client side, and the default value is 65,480.
1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
@ -356,7 +356,7 @@ Writing process tries to read as much as possible data from message queue and wr
<details>
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, if the SQL length is closed to `maxSQLLength` the SQL will be executed immediately. To improve writing efficiency, it's better to increase `maxSQLLength` properly.
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
<summary>SQLWriter</summary>

View File

@ -245,3 +245,35 @@ Provides dnode configuration information.
| 1 | dnode_id | INT | Dnode ID |
| 2 | name | BINARY(32) | Parameter |
| 3 | value | BINARY(64) | Value |
## INS_TOPICS
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------ | ------------------------------ |
| 1 | topic_name | BINARY(192) | Topic name |
| 2 | db_name | BINARY(64) | Database for the topic |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
## INS_SUBSCRIPTIONS
| # | **Column** | **Data Type** | **Description** |
| --- | :------------: | ------------ | ------------------------ |
| 1 | topic_name | BINARY(204) | Subscribed topic |
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
| 4 | consumer_id | BIGINT | Consumer ID |
## INS_STREAMS
| # | **Column** | **Data Type** | **Description** |
| --- | :----------: | ------------ | --------------------------------------- |
| 1 | stream_name | BINARY(64) | Stream name |
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
| 4 | status | BIANRY(20) | Current status |
| 5 | source_db | BINARY(64) | Source database |
| 6 | target_db | BIANRY(64) | Target database |
| 7 | target_table | BINARY(192) | Target table |
| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |

View File

@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 12 | sub_status | BINARY(1000) | Subquery status |
| 13 | sql | BINARY(1024) | SQL statement |
## PERF_TOPICS
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------ | ------------------------------ |
| 1 | topic_name | BINARY(192) | Topic name |
| 2 | db_name | BINARY(64) | Database for the topic |
| 3 | create_time | TIMESTAMP | Creation time |
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
## PERF_CONSUMERS
| # | **Column** | **Data Type** | **Description** |
@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering |
## PERF_SUBSCRIPTIONS
| # | **Column** | **Data Type** | **Description** |
| --- | :------------: | ------------ | ------------------------ |
| 1 | topic_name | BINARY(204) | Subscribed topic |
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
| 4 | consumer_id | BIGINT | Consumer ID |
## PERF_TRANS
| # | **Column** | **Data Type** | **Description** |
@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | stable_name | BINARY(192) | Supertable name |
| 4 | vgroup_id | INT | Dedicated vgroup name |
## PERF_STREAMS
| # | **Column** | **Data Type** | **Description** |
| --- | :----------: | ------------ | --------------------------------------- |
| 1 | stream_name | BINARY(64) | Stream name |
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
| 4 | status | BIANRY(20) | Current status |
| 5 | source_db | BINARY(64) | Source database |
| 6 | target_db | BIANRY(64) | Target database |
| 7 | target_table | BINARY(192) | Target table |
| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |

View File

@ -7,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".

View File

@ -1,10 +0,0 @@
- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装)
:::info
由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。
- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
:::

View File

@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
description: Instructions and tips for using taosKeeper
description: exports TDengine monitoring metrics.
---
## Introduction
@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
### Configuration and running methods
<!-- taosKeeper needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [configuration file](#configuration-file-parameters-in-detail). Command-line arguments take precedence over values in the configuration file. -->
taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
<!--
### Command-Line Parameters
You can use command-line parameters to run taosBenchmark and control its behavior:
You can use command-line parameters to run taosKeeper and control its behavior:
```shell
taosKeeper
$ taosKeeper
```
-->
### Environment variable
You can use Environment variable to run taosKeeper and control its behavior:
```shell
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
$ taoskeeper
```
you can run `taoskeeper -h` for more detail.
### Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
taoskeeper -c <keeper config file>
$ taoskeeper -c <keeper config file>
```
**Sample configuration files**
@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
#### Export Monitoring Metrics
```shell
curl http://127.0.0.1:6043/metrics
$ curl http://127.0.0.1:6043/metrics
```
Sample result set (excerpt):

View File

@ -0,0 +1,36 @@
---
sidebar_label: Google Data Studio
title: Use Google Data Studio to access TDengine
---
Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
![02](gds/gds-02.png.webp)
Select the TDengine connector and click Authorize.
![03](gds/gds-03.png.webp)
Then sign in to your Google Account and click Allow to enable the connection to TDengine.
![04](gds/gds-04.png.webp)
In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
![05](gds/gds-05.png.webp)
After the connection is established, you can use Data Studio to process your data and create reports.
![06](gds/gds-06.png.webp)
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data some examples are shown below.
![07](gds/gds-07.png.webp)
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, were sure youll be able to gain new insights and obtain even more value from your data.

BIN
docs/en/20-third-party/gds/gds-01.webp vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -26,7 +26,7 @@ TDengine 的主要功能如下:
- [Icinga2](../third-party/icinga2)
- [TCollector](../third-party/tcollector)
- [EMQX](../third-party/emq-broker)
- [HiveMQ](../third-party/hive-mq-broker)
- [HiveMQ](../third-party/hive-mq-broker)
2. 查询数据,支持
- [标准 SQL](../taos-sql),含嵌套查询
- [时序数据特色函数](../taos-sql/function/#time-series-extensions)
@ -85,14 +85,14 @@ TDengine 的主要功能如下:
![TDengine Database 技术生态图](eco_system.webp)
<center><figcaption>图 1. TDengine技术生态图</figcaption></center>
<center><figcaption>图 1. TDengine 技术生态图</figcaption></center>
</figure>
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序CLI以及可视化管理工具。
## 典型适用场景
作为一个高性能、分布式、支持 SQL 的时序数据库Database)TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
作为一个高性能、分布式、支持 SQL 的时序数据库DatabaseTDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求

View File

@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem";
从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前TDengine 支持的一条 SQL 的最大长度为 1,048,5761M个字符。可通过配置客户端参数 maxSQLLength默认值为 65480进行修改。
1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前TDengine 支持的一条 SQL 的最大长度为 1,048,5761M个字符。
2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
4. 写入方式。一般来讲:
@ -348,7 +348,7 @@ main 函数可以接收 5 个启动参数,依次是:
<details>
SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,如果接近 SQL 最大长度限制maxSQLLength将会立即执行 SQL。为了减少 SQL 此时,建议将 maxSQLLength 适当调大
SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576
<summary>SQLWriter</summary>

View File

@ -246,3 +246,35 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
| 1 | dnode_id | INT | dnode 的 ID |
| 2 | name | BINARY(32) | 配置项名称 |
| 3 | value | BINARY(64) | 该配置项的值 |
## INS_TOPICS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------------------------ |
| 1 | topic_name | BINARY(192) | topic 名称 |
| 2 | db_name | BINARY(64) | topic 相关的 DB |
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
## INS_SUBSCRIPTIONS
| # | **列名** | **数据类型** | **说明** |
| --- | :------------: | ------------ | ------------------------ |
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
## INS_STREAMS
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | --------------------------------------- |
| 1 | stream_name | BINARY(64) | 流计算名称 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | BIANRY(20) | 流当前状态 |
| 5 | source_db | BINARY(64) | 源数据库 |
| 6 | target_db | BIANRY(64) | 目的数据库 |
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
| 8 | watermark | BIGINT | watermark详见 SQL 手册流式计算 |
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |

View File

@ -62,15 +62,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 12 | sub_status | BINARY(1000) | 子查询状态 |
| 13 | sql | BINARY(1024) | SQL 语句 |
## PERF_TOPICS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------ | ------------------------------ |
| 1 | topic_name | BINARY(192) | topic 名称 |
| 2 | db_name | BINARY(64) | topic 相关的 DB |
| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
## PERF_CONSUMERS
| # | **列名** | **数据类型** | **说明** |
@ -84,15 +75,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
| 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 |
## PERF_SUBSCRIPTIONS
| # | **列名** | **数据类型** | **说明** |
| --- | :------------: | ------------ | ------------------------ |
| 1 | topic_name | BINARY(204) | 被订阅的 topic |
| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
| 4 | consumer_id | BIGINT | 消费者的唯一 id |
## PERF_TRANS
| # | **列名** | **数据类型** | **说明** |
@ -114,17 +96,3 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 2 | create_time | TIMESTAMP | sma 创建时间 |
| 3 | stable_name | BINARY(192) | sma 所属的超级表名称 |
| 4 | vgroup_id | INT | sma 专属的 vgroup 名称 |
## PERF_STREAMS
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | --------------------------------------- |
| 1 | stream_name | BINARY(64) | 流计算名称 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | BIANRY(20) | 流当前状态 |
| 5 | source_db | BINARY(64) | 源数据库 |
| 6 | target_db | BIANRY(64) | 目的数据库 |
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
| 8 | watermark | BIGINT | watermark详见 SQL 手册流式计算 |
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |

View File

@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
description: TDengine taosKeeper 使用说明
description: TDengine 3.0 版本监控指标的导出工具
---
## 简介
@ -22,26 +22,36 @@ taosKeeper 安装方式:
### 配置和运行方式
<!-- taosKeeper 需要在操作系统终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数启动) 和 [配置文件](#配置文件启动)。命令行参数优先级高于配置文件参数。-->
taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
<!--
### 命令行参数启动
在使用命令行参数运行 taosBenchmark 并控制其行为。
在使用命令行参数运行 taosKeeper 并控制其行为。
```shell
taosKeeper
$ taosKeeper
```
-->
### 环境变量启动
通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
```shell
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
$ taoskeeper
```
具体参数列表请参照 `taoskeeper -h` 输入结果。
### 配置文件启动
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
```shell
taoskeeper -c <keeper config file>
$ taoskeeper -c <keeper config file>
```
**下面是配置文件的示例:**
@ -110,7 +120,7 @@ Query OK, 1 rows in database (0.036162s)
#### 导出监控指标
```shell
curl http://127.0.0.1:6043/metrics
$ curl http://127.0.0.1:6043/metrics
```
部分结果集:

View File

@ -0,0 +1,39 @@
---
sidebar_label: Google Data Studio
title: TDengine Google Data Studio Connector
description: 使用 Google Data Studio 存取 TDengine 数据的详细指南
---
Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。
Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage或是通过连接器来接入其它数据源。
![01](gds/gds-01.webp)
目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine将其选作数据源。
![02](gds/gds-02.png.webp)
接下来选择 AUTHORIZE 按钮。
![03](gds/gds-03.png.webp)
设置允许连接自己的账号到外部服务。
![04](gds/gds-04.png.webp)
在接下来的页面选择运行 TDengine REST 服务的 URL并输入用户名、密码、数据库名称、表名称以及查询时间范围并点击右上角的 CONNECT 按钮。
![05](gds/gds-05.png.webp)
连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。
![06](gds/gds-06.png.webp)
目前的维度和指标规则是timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。
![07](gds/gds-07.png.webp)
![08](gds/gds-08.png.webp)
![09](gds/gds-09.png.webp)
![10](gds/gds-10.png.webp)
![11](gds/gds-11.png.webp)

BIN
docs/zh/20-third-party/gds/gds-01.webp vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -13,15 +13,9 @@ IF (TD_LINUX)
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
add_executable(tmq "")
add_executable(tmq_taosx "")
add_executable(stream_demo "")
add_executable(demoapi "")
target_sources(tmq_taosx
PRIVATE
"tmq_taosx.c"
)
target_sources(tmq
PRIVATE
"tmq.c"
@ -41,10 +35,6 @@ IF (TD_LINUX)
taos_static
)
target_link_libraries(tmq_taosx
taos_static
)
target_link_libraries(stream_demo
taos_static
)
@ -57,10 +47,6 @@ IF (TD_LINUX)
PUBLIC "${TD_SOURCE_DIR}/include/os"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_include_directories(tmq_taosx
PUBLIC "${TD_SOURCE_DIR}/include/os"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
target_include_directories(stream_demo
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
@ -73,7 +59,6 @@ IF (TD_LINUX)
)
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
SET_TARGET_PROPERTIES(tmq_taosx PROPERTIES OUTPUT_NAME tmq_taosx)
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
ENDIF ()

View File

@ -1,489 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "taos.h"
static int running = 1;
static TAOS* use_db(){
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return NULL;
}
TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
if (taos_errno(pRes) != 0) {
printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
return NULL;
}
taos_free_result(pRes);
return pConn;
}
static void msg_process(TAOS_RES* msg) {
/*memset(buf, 0, 1024);*/
printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
printf("db: %s\n", tmq_get_db_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg));
TAOS *pConn = use_db();
if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
char* result = tmq_get_json_meta(msg);
if (result) {
printf("meta result: %s\n", result);
}
tmq_free_json_meta(result);
}
tmq_raw_data raw = {0};
tmq_get_raw(msg, &raw);
int32_t ret = tmq_write_raw(pConn, raw);
printf("write raw data: %s\n", tmq_err2str(ret));
// else{
// while(1){
// int numOfRows = 0;
// void *pData = NULL;
// taos_fetch_raw_block(msg, &numOfRows, &pData);
// if(numOfRows == 0) break;
// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
// printf("write raw data: %s\n", tmq_err2str(ret));
// }
// }
taos_close(pConn);
}
int32_t init_env() {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
if (taos_errno(pRes) != 0) {
printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 4");
if (taos_errno(pRes) != 0) {
printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop database if exists abc1");
if (taos_errno(pRes) != 0) {
printf("error in drop db, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create database if not exists abc1 vgroups 3");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "use abc1");
if (taos_errno(pRes) != 0) {
printf("error in use db, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn,
"create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
"nchar(8), t4 bool)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct4 using st1(t3) tags('ct4')");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct4, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
if (taos_errno(pRes) != 0) {
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
if (taos_errno(pRes) != 0) {
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 select * from ct1");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
if (taos_errno(pRes) != 0) {
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
if (taos_errno(pRes) != 0) {
printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table ct3 ct1");
if (taos_errno(pRes) != 0) {
printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table st1");
if (taos_errno(pRes) != 0) {
printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
if (taos_errno(pRes) != 0) {
printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 comment 'hello'");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "alter table n1 drop column c1");
if (taos_errno(pRes) != 0) {
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
if (taos_errno(pRes) != 0) {
printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table n1");
if (taos_errno(pRes) != 0) {
printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table jt2 using jt tags('')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn,
"create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
"nchar(8), t4 bool)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table st1");
if (taos_errno(pRes) != 0) {
printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
}
int32_t create_topic() {
printf("create topic\n");
TAOS_RES* pRes;
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
pRes = taos_query(pConn, "use abc1");
if (taos_errno(pRes) != 0) {
printf("error in use db, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
}
void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
printf("commit %d tmq %p param %p\n", code, tmq, param);
}
tmq_t* build_consumer() {
#if 0
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
assert(pConn != NULL);
TAOS_RES* pRes = taos_query(pConn, "use abc1");
if (taos_errno(pRes) != 0) {
printf("error in use db, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
#endif
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "group.id", "tg2");
tmq_conf_set(conf, "client.id", "my app 1");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
/*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
assert(tmq);
tmq_conf_destroy(conf);
return tmq;
}
tmq_list_t* build_topic_list() {
tmq_list_t* topic_list = tmq_list_new();
tmq_list_append(topic_list, "topic_ctb_column");
/*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
return topic_list;
}
void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
int32_t code;
if ((code = tmq_subscribe(tmq, topics))) {
fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
printf("subscribe err\n");
return;
}
int32_t cnt = 0;
while (running) {
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
if (tmqmessage) {
cnt++;
msg_process(tmqmessage);
/*if (cnt >= 2) break;*/
/*printf("get data\n");*/
taos_free_result(tmqmessage);
/*} else {*/
/*break;*/
/*tmq_commit_sync(tmq, NULL);*/
}
}
code = tmq_consumer_close(tmq);
if (code)
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
else
fprintf(stderr, "%% Consumer closed\n");
}
void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
static const int MIN_COMMIT_COUNT = 1;
int msg_count = 0;
int32_t code;
if ((code = tmq_subscribe(tmq, topics))) {
fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
return;
}
tmq_list_t* subList = NULL;
tmq_subscription(tmq, &subList);
char** subTopics = tmq_list_to_c_array(subList);
int32_t sz = tmq_list_get_size(subList);
printf("subscribed topics: ");
for (int32_t i = 0; i < sz; i++) {
printf("%s, ", subTopics[i]);
}
printf("\n");
tmq_list_destroy(subList);
while (running) {
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
if (tmqmessage) {
msg_process(tmqmessage);
taos_free_result(tmqmessage);
/*tmq_commit_sync(tmq, NULL);*/
/*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
}
}
code = tmq_consumer_close(tmq);
if (code)
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
else
fprintf(stderr, "%% Consumer closed\n");
}
int main(int argc, char* argv[]) {
printf("env init\n");
if (init_env() < 0) {
return -1;
}
create_topic();
tmq_t* tmq = build_consumer();
tmq_list_t* topic_list = build_topic_list();
basic_consume_loop(tmq, topic_list);
/*sync_consume_loop(tmq, topic_list);*/
}

View File

@ -254,6 +254,7 @@ enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
TMQ_RES_TAOSX = 3,
};
typedef struct tmq_raw_data {

View File

@ -43,17 +43,17 @@ extern "C" {
#define TSDB_INS_TABLE_VNODES "ins_vnodes"
#define TSDB_INS_TABLE_CONFIGS "ins_configs"
#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables"
#define TSDB_INS_TABLE_SUBSCRIPTIONS "ins_subscriptions"
#define TSDB_INS_TABLE_TOPICS "ins_topics"
#define TSDB_INS_TABLE_STREAMS "ins_streams"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
#define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections"
#define TSDB_PERFS_TABLE_QUERIES "perf_queries"
#define TSDB_PERFS_TABLE_TOPICS "perf_topics"
#define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers"
#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions"
#define TSDB_PERFS_TABLE_OFFSETS "perf_offsets"
#define TSDB_PERFS_TABLE_TRANS "perf_trans"
#define TSDB_PERFS_TABLE_STREAMS "perf_streams"
#define TSDB_PERFS_TABLE_APPS "perf_apps"
typedef struct SSysDbTableSchema {

View File

@ -73,6 +73,7 @@ enum {
TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP,
TMQ_MSG_TYPE__TAOSX_RSP,
TMQ_MSG_TYPE__END_RSP,
};
@ -129,7 +130,6 @@ typedef struct SDataBlockInfo {
uint32_t capacity;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize

View File

@ -144,8 +144,8 @@ void taosCfgDynamicOptions(const char *option, const char *value);
struct SConfig *taosGetCfg();
void taosSetAllDebugFlag(int32_t flag);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
void taosSetAllDebugFlag(int32_t flag, bool rewrite);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosSetCfg(SConfig *pCfg, char *name);
void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);

View File

@ -276,7 +276,6 @@ struct SSchema {
char name[TSDB_COL_NAME_LEN];
};
typedef struct {
char tbName[TSDB_TABLE_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN];
@ -295,17 +294,15 @@ typedef struct {
SSchema* pSchemas;
} STableMetaRsp;
typedef struct {
int32_t code;
int8_t hashMeta;
int64_t uid;
char* tblFName;
int32_t numOfRows;
int32_t affectedRows;
int64_t sver;
STableMetaRsp* pMeta;
int32_t code;
int8_t hashMeta;
int64_t uid;
char* tblFName;
int32_t numOfRows;
int32_t affectedRows;
int64_t sver;
STableMetaRsp* pMeta;
} SSubmitBlkRsp;
typedef struct {
@ -320,7 +317,7 @@ typedef struct {
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
#define COL_SMA_ON ((int8_t)0x1)
@ -2049,8 +2046,8 @@ typedef struct {
STableMetaRsp* pMeta;
} SVCreateTbRsp, SVUpdateTbRsp;
int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
void tFreeSVCreateTbRsp(void* param);
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
@ -2073,6 +2070,7 @@ int32_t tDeserializeSVCreateTbBatchRsp(void* buf, int32_t bufLen, SVCreateTbBatc
// TDMT_VND_DROP_TABLE =================
typedef struct {
char* name;
uint64_t suid; // for tmq in wal format
int8_t igNotExists;
} SVDropTbReq;
@ -2629,6 +2627,22 @@ typedef struct {
};
} STqOffsetVal;
static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
pOffsetVal->uid = uid;
pOffsetVal->ts = ts;
}
static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) {
pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META;
pOffsetVal->uid = uid;
}
static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
pOffsetVal->type = TMQ_OFFSET__LOG;
pOffsetVal->version = ver;
}
int32_t tEncodeSTqOffsetVal(SEncoder* pEncoder, const STqOffsetVal* pOffsetVal);
int32_t tDecodeSTqOffsetVal(SDecoder* pDecoder, STqOffsetVal* pOffsetVal);
int32_t tFormatOffset(char* buf, int32_t maxLen, const STqOffsetVal* pVal);
@ -2960,6 +2974,26 @@ typedef struct {
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
void tDeleteSMqDataRsp(SMqDataRsp* pRsp);
typedef struct {
SMqRspHead head;
STqOffsetVal reqOffset;
STqOffsetVal rspOffset;
int32_t blockNum;
int8_t withTbName;
int8_t withSchema;
SArray* blockDataLen;
SArray* blockData;
SArray* blockTbName;
SArray* blockSchema;
int32_t createTableNum;
SArray* createTableLen;
SArray* createTableReq;
} STaosxRsp;
int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp);
int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp);
typedef struct {
SMqRspHead head;

View File

@ -92,6 +92,8 @@ struct SResultRowEntryInfo;
//for selectivity query, the corresponding tag value is assigned if the data is qualified
typedef struct SSubsidiaryResInfo {
int16_t num;
int32_t rowLen;
char* buf; // serialize data buffer
struct SqlFunctionCtx **pCtx;
} SSubsidiaryResInfo;
@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData {
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
} SInputColumnInfoData;
typedef struct SSerializeDataHandle {
struct SDiskbasedBuf* pBuf;
int32_t currentPage;
} SSerializeDataHandle;
// sql function runtime context
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
@ -137,10 +144,9 @@ typedef struct SqlFunctionCtx {
SFuncExecFuncs fpSet;
SScalarFuncExecFuncs sfp;
struct SExprInfo *pExpr;
struct SDiskbasedBuf *pBuf;
struct SSDataBlock *pSrcBlock;
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
int32_t curBufPage;
SSerializeDataHandle saveHandle;
bool isStream;
char udfName[TSDB_FUNC_NAME_LEN];

View File

@ -176,7 +176,8 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc);
bool fmIsBuiltinFunc(const char* pFunc);
bool fmIsBuiltinFunc(const char* pFunc);
EFunctionType fmGetFuncType(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
bool fmIsScalarFunc(int32_t funcId);

View File

@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
if (tEncodeU32v(pCoder, len) < 0) return -1;
if (pCoder->data) {
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
memcpy(TD_CODER_CURRENT(pCoder), val, len);
}
if (len) {
if (pCoder->data) {
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
memcpy(TD_CODER_CURRENT(pCoder), val, len);
}
TD_CODER_MOVE_POS(pCoder, len);
TD_CODER_MOVE_POS(pCoder, len);
}
return 0;
}
@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
uint64_t length = 0;
if (tDecodeU64v(pCoder, &length) < 0) return -1;
if (len) *len = length;
if (length) {
if (len) *len = length;
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
*val = taosMemoryMalloc(length);
if (*val == NULL) return -1;
memcpy(*val, TD_CODER_CURRENT(pCoder), length);
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
*val = taosMemoryMalloc(length);
if (*val == NULL) return -1;
memcpy(*val, TD_CODER_CURRENT(pCoder), length);
TD_CODER_MOVE_POS(pCoder, length);
TD_CODER_MOVE_POS(pCoder, length);
} else {
*val = NULL;
}
return 0;
}

View File

@ -58,11 +58,10 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
/**
*
* @param pBuf
* @param groupId
* @param pageId
* @return
*/
void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId);
void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId);
/**
*

View File

@ -64,6 +64,11 @@ pipeline {
defaultValue:'2.1.2',
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
)
string (
name:'nasPassword',
defaultValue:'password',
description: 'the pasword of the NAS server which has installPackage-192.168.1.131'
)
}
environment{
WORK_DIR = '/var/lib/jenkins/workspace'
@ -111,17 +116,17 @@ pipeline {
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
}
@ -134,17 +139,22 @@ pipeline {
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_DEB} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client ${nasPassword}
python3 checkPackageRuning.py
'''
}
@ -157,17 +167,17 @@ pipeline {
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
}
@ -179,19 +189,42 @@ pipeline {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
python3 checkPackageRuning.py
rmtaos
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client ${nasPassword}
python3 checkPackageRuning.py
'''
}
}
}
stage('arm64') {
agent{label 'linux_arm64'}
steps {
timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client ${nasPassword}
python3 checkPackageRuning.py
'''
}

13
packaging/debRpmAutoInstall.sh Executable file
View File

@ -0,0 +1,13 @@
#!/usr/bin/expect
set packgeName [lindex $argv 0]
set packageSuffix [lindex $argv 1]
set timeout 3
if { ${packageSuffix} == "deb" } {
spawn dpkg -i ${packgeName}
} elseif { ${packageSuffix} == "rpm"} {
spawn rpm -ivh ${packgeName}
}
expect "*one:"
send "\r"
expect "*skip:"
send "\r"

View File

@ -1,12 +1,24 @@
#!/bin/sh
#parameter
scriptDir=$(dirname $(readlink -f $0))
packgeName=$1
version=$2
originPackageName=$3
originversion=$4
testFile=$5
subFile="taos.tar.gz"
password=$6
# Color setting
RED='\033[41;30m'
GREEN='\033[1;32m'
YELLOW='\033[1;33m'
BLUE='\033[1;34m'
GREEN_DARK='\033[0;32m'
YELLOW_DARK='\033[0;33m'
BLUE_DARK='\033[0;34m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
if [ ${testFile} = "server" ];then
tdPath="TDengine-server-${version}"
@ -23,111 +35,211 @@ elif [ ${testFile} = "tools" ];then
fi
function cmdInstall {
comd=$1
if command -v ${comd} ;then
echo "${comd} is already installed"
command=$1
if command -v ${command} ;then
echoColor YD "${command} is already installed"
else
if command -v apt ;then
apt-get install ${comd} -y
apt-get install ${command} -y
elif command -v yum ;then
yum -y install ${comd}
echo "you should install ${comd} manually"
yum -y install ${command}
echoColor YD "you should install ${command} manually"
fi
fi
}
function echoColor {
color=$1
command=$2
echo "Uninstall all components of TDeingne"
if command -v rmtaos ;then
echo "uninstall all components of TDeingne:rmtaos"
rmtaos
else
echo "os doesn't include TDengine "
if [ ${color} = 'Y' ];then
echo -e "${YELLOW}${command}${NC}"
elif [ ${color} = 'YD' ];then
echo -e "${YELLOW_DARK}${command}${NC}"
elif [ ${color} = 'R' ];then
echo -e "${RED}${command}${NC}"
elif [ ${color} = 'G' ];then
echo -e "${GREEN}${command}${NC}\r\n"
elif [ ${color} = 'B' ];then
echo -e "${BLUE}${command}${NC}"
elif [ ${color} = 'BD' ];then
echo -e "${BLUE_DARK}${command}${NC}"
fi
}
if command -v rmtaostools ;then
echo "uninstall all components of TDeingne:rmtaostools"
rmtaostools
else
echo "os doesn't include rmtaostools "
fi
echoColor G "===== install basesoft ====="
cmdInstall tree
cmdInstall wget
cmdInstall sshpass
echo "new workroom path"
echoColor G "===== Uninstall all components of TDeingne ====="
if command -v rmtaos ;then
echoColor YD "uninstall all components of TDeingne:rmtaos"
rmtaos
else
echoColor YD "os doesn't include TDengine"
fi
if command -v rmtaostools ;then
echoColor YD "uninstall all components of TDeingne:rmtaostools"
rmtaostools
else
echoColor YD "os doesn't include rmtaostools "
fi
echoColor G "===== new workroom path ====="
installPath="/usr/local/src/packageTest"
oriInstallPath="/usr/local/src/packageTest/3.1"
if [ ! -d ${installPath} ] ;then
echoColor BD "mkdir -p ${installPath}"
mkdir -p ${installPath}
else
echo "${installPath} already exists"
echoColor YD "${installPath} already exists"
fi
if [ -d ${installPath}/${tdPath} ] ;then
echoColor BD "rm -rf ${installPath}/${tdPath}/*"
rm -rf ${installPath}/${tdPath}/*
fi
if [ ! -d ${oriInstallPath} ] ;then
echoColor BD "mkdir -p ${oriInstallPath}"
mkdir -p ${oriInstallPath}
else
echo "${oriInstallPath} already exists"
echoColor YD "${oriInstallPath} already exists"
fi
echo "decompress installPackage"
if [ -d ${oriInstallPath}/${originTdpPath} ] ;then
echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*"
rm -rf ${oriInstallPath}/${originTdpPath}/*
fi
echoColor G "===== download installPackage ====="
# cd ${installPath}
# wget https://www.taosdata.com/assets-download/3.0/${packgeName}
# cd ${oriInstallPath}
# wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
cd ${installPath}
wget https://www.taosdata.com/assets-download/3.0/${packgeName}
cd ${oriInstallPath}
wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
cp -r ${scriptDir}/debRpmAutoInstall.sh .
if [ ! -f {packgeName} ];then
echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ."
sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} .
fi
packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}')
if [ ! -f debRpmAutoInstall.sh ];then
echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
echo 'set packgeName [lindex $argv 0]' >> debRpmAutoInstall.sh
echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
echo 'set timeout 3 ' >> debRpmAutoInstall.sh
echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
echo ' spawn dpkg -i ${packgeName} ' >> debRpmAutoInstall.sh
echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
echo ' spawn rpm -ivh ${packgeName}' >> debRpmAutoInstall.sh
echo '}' >> debRpmAutoInstall.sh
echo 'expect "*one:"' >> debRpmAutoInstall.sh
echo 'send "\r"' >> debRpmAutoInstall.sh
echo 'expect "*skip:"' >> debRpmAutoInstall.sh
echo 'send "\r" ' >> debRpmAutoInstall.sh
fi
echoColor G "===== instal Package ====="
if [[ ${packgeName} =~ "deb" ]];then
cd ${installPath}
echo "dpkg ${packgeName}" && dpkg -i ${packgeName}
dpkg -r taostools
dpkg -r tdengine
if [[ ${packgeName} =~ "TDengine" ]];then
echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
else
echoColor BD "dpkg -i ${packgeName}" && dpkg -i ${packgeName}
fi
elif [[ ${packgeName} =~ "rpm" ]];then
cd ${installPath}
echo "rpm ${packgeName}" && rpm -ivh ${packgeName}
sudo rpm -e tdengine
sudo rpm -e taostools
if [[ ${packgeName} =~ "TDengine" ]];then
echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
else
echoColor BD "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName}
fi
elif [[ ${packgeName} =~ "tar" ]];then
echo "tar ${packgeName}" && tar -xvf ${packgeName}
cd ${oriInstallPath}
echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName}
echoColor G "===== check installPackage File of tar ====="
cd ${oriInstallPath}
if [ ! -f {originPackageName} ];then
echoColor YD "download base installPackage"
echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ."
sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} .
fi
echoColor YD "unzip the base installation package"
echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName}
cd ${installPath}
echo "tar -xvf ${packgeName}" && tar -xvf ${packgeName}
echoColor YD "unzip the new installation package"
echoColor BD "tar -xf ${packgeName}" && tar -xf ${packgeName}
if [ ${testFile} != "tools" ] ;then
cd ${installPath}/${tdPath} && tar vxf ${subFile}
cd ${oriInstallPath}/${originTdpPath} && tar vxf ${subFile}
cd ${installPath}/${tdPath} && tar xf ${subFile}
cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile}
fi
echo "check installPackage File"
cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile
cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile
cd ${installPath}
tree ${oriInstallPath}/${originTdpPath} > ${originPackageName}_checkfile
tree ${installPath}/${tdPath} > ${packgeName}_checkfile
diff ${packgeName}_checkfile ${originPackageName}_checkfile > ${installPath}/diffFile.log
diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
if [ ${diffNumbers} != 0 ];then
echo "The number and names of files have changed from the previous installation package"
echo `cat ${installPath}/diffFile.log`
exit -1
fi
if [ ${diffNumbers} != 0 ];then
echoColor R "The number and names of files is different from the previous installation package"
echoColor Y `cat ${installPath}/diffFile.log`
exit -1
else
echoColor G "The number and names of files are the same as previous installation packages"
fi
echoColor YD "===== install Package of tar ====="
cd ${installPath}/${tdPath}
if [ ${testFile} = "server" ];then
echoColor BD "bash ${installCmd} -e no "
bash ${installCmd} -e no
else
echoColor BD "bash ${installCmd} "
bash ${installCmd}
fi
if [[ ${packgeName} =~ "Lite" ]];then
cd ${installPath}
wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
tar xvf taosTools-2.1.2-Linux-x64.tar.gz
cd taosTools-2.1.2 && bash install-taostools.sh
fi
fi
cd ${installPath}
if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
echoColor G "===== install taos-tools when package is lite or client ====="
cd ${installPath}
sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
# wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
tar xf taosTools-2.1.2-Linux-x64.tar.gz
cd taosTools-2.1.2 && bash install-taostools.sh
elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
echoColor G "===== install taos-tools when package is lite or client ====="
cd ${installPath}
sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
tar xf taosTools-2.1.2-Linux-x64.tar.gz
cd taosTools-2.1.2 && bash install-taostools.sh
elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
echoColor G "===== install taos-tools when package is lite or client ====="
cd ${installPath}
sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
tar xf taosTools-2.1.2-Linux-x64.tar.gz
cd taosTools-2.1.2 && bash install-taostools.sh
fi

View File

@ -52,15 +52,17 @@ enum {
RES_TYPE__QUERY = 1,
RES_TYPE__TMQ,
RES_TYPE__TMQ_META,
RES_TYPE__TAOSX,
};
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ || *(int8_t*)res == RES_TYPE__TAOSX)
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
#define TD_RES_TMQ_TAOSX(res) (*(int8_t*)res == RES_TYPE__TAOSX)
typedef struct SAppInstInfo SAppInstInfo;
@ -102,6 +104,7 @@ typedef struct SQueryExecMetric {
int64_t ctgEnd; // end to parse, us
int64_t semanticEnd;
int64_t planEnd;
int64_t resultReady;
int64_t execEnd;
int64_t send; // start to send to server, us
int64_t rsp; // receive response from server, us
@ -199,8 +202,8 @@ typedef struct {
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
SMqDataRsp rsp;
SReqResultInfo resInfo;
SMqDataRsp rsp;
} SMqRspObj;
typedef struct {
@ -211,6 +214,17 @@ typedef struct {
SMqMetaRsp metaRsp;
} SMqMetaRspObj;
typedef struct {
int8_t resType;
char topic[TSDB_TOPIC_FNAME_LEN];
char db[TSDB_DB_FNAME_LEN];
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
SReqResultInfo resInfo;
STaosxRsp rsp;
} SMqTaosxRspObj;
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@ -370,7 +384,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData*
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);
int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);

View File

@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) {
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) {
tmq_conf_t *conf = tmq_conf_new();
jniGetGlobalMethod(env);
return (jlong)conf;
}

View File

@ -76,19 +76,19 @@ static void deregisterRequest(SRequestObj *pRequest) {
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, exec:%" PRId64 "us",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, planner:%" PRId64 "us, exec:%" PRId64 "us",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, planner:%" PRId64 "us, exec:%" PRId64 "us",
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.planEnd - pRequest->metric.semanticEnd,
nowUs - pRequest->metric.semanticEnd);
pRequest->metric.resultReady - pRequest->metric.planEnd);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
}

View File

@ -728,7 +728,7 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog
tFreeSTableMetaRsp(blk->pMeta);
taosMemoryFreeClear(blk->pMeta);
}
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
continue;
}
@ -851,6 +851,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SRequestObj* pRequest = (SRequestObj*)param;
pRequest->code = code;
pRequest->metric.resultReady = taosGetTimestampUs();
if (pResult) {
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
}

View File

@ -184,6 +184,19 @@ void taos_free_result(TAOS_RES *res) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
destroyRequest(pRequest);
} else if (TD_RES_TMQ_TAOSX(res)) {
SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
// taosx
taosArrayDestroy(pRsp->rsp.createTableLen);
taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
pRsp->resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&pRsp->resInfo);
taosMemoryFree(pRsp);
} else if (TD_RES_TMQ(res)) {
SMqRspObj *pRsp = (SMqRspObj *)res;
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);

View File

@ -356,6 +356,7 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray* pColumns,
goto end;
}
pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
@ -456,7 +457,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
info->cost.numOfCreateSTables++;
@ -492,7 +493,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
}

View File

@ -164,6 +164,7 @@ typedef struct {
union {
SMqDataRsp dataRsp;
SMqMetaRsp metaRsp;
STaosxRsp taosxRsp;
};
} SMqPollRspWrapper;
@ -810,8 +811,19 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
int32_t rsp;
int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
int32_t rsp = tmq_subscribe(tmq, lst);
while (1) {
rsp = tmq_subscribe(tmq, lst);
if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
break;
} else {
retryCnt++;
taosMsleep(500);
}
}
tmq_list_destroy(lst);
return rsp;
}
@ -1130,21 +1142,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
} else {
ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
rspType);
} else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
SDecoder decoder;
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
} else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) {
SDecoder decoder;
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
} else {
ASSERT(0);
}
taosMemoryFree(pMsg->pData);
tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
rspType);
taosWriteQitem(tmq->mqueue, pRspWrapper);
tsem_post(&tmq->rspSem);
@ -1443,6 +1463,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
pRspObj->resType = RES_TYPE__TAOSX;
tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
pRspObj->vgId = pWrapper->vgHandle->vgId;
pRspObj->resIter = -1;
memcpy(&pRspObj->rsp, &pWrapper->dataRsp, sizeof(SMqTaosxRspObj));
pRspObj->resInfo.totalRows = 0;
pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
if (!pWrapper->dataRsp.withSchema) {
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
}
return pRspObj;
}
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
/*tscDebug("call poll");*/
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
@ -1595,6 +1633,30 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
taosFreeQitem(pollRspWrapper);
}
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
/*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
SMqClientVg* pVg = pollRspWrapper->vgHandle;
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
* rspMsg->msg.rspOffset);*/
pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset;
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
if (pollRspWrapper->taosxRsp.blockNum == 0) {
taosFreeQitem(pollRspWrapper);
rspWrapper = NULL;
continue;
}
// build rsp
SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
taosFreeQitem(pollRspWrapper);
return pRsp;
} else {
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
taosFreeQitem(pollRspWrapper);
}
} else {
/*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/
bool reset = false;
@ -1707,9 +1769,11 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) {
return TMQ_RES_DATA;
return TMQ_RES_TAOSX;
}
return TMQ_RES_TABLE_META;
} else if (TD_RES_TMQ_TAOSX(res)) {
return TMQ_RES_TAOSX;
} else {
return TMQ_RES_INVALID;
}

View File

@ -240,6 +240,22 @@ static const SSysDbTableSchema variablesSchema[] = {
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema topicSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
// TODO config
};
static const SSysDbTableSchema subscriptionSchema[] = {
{.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
@ -260,6 +276,9 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true},
{TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true},
{TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
{TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
{TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
};
static const SSysDbTableSchema connectionsSchema[] = {
@ -272,13 +291,6 @@ static const SSysDbTableSchema connectionsSchema[] = {
{.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysDbTableSchema topicSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
// TODO config
};
static const SSysDbTableSchema consumerSchema[] = {
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
@ -292,13 +304,6 @@ static const SSysDbTableSchema consumerSchema[] = {
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysDbTableSchema subscriptionSchema[] = {
{.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysDbTableSchema offsetSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
@ -345,13 +350,10 @@ static const SSysDbTableSchema appSchema[] = {
static const SSysTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false},
{TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false},
{TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
{TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false},
{TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
// {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
{TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false},
// {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false},
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
{TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}};
// clang-format on

View File

@ -427,6 +427,152 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
return 0;
}
static int32_t taosUpdateServerCfg(SConfig *pCfg) {
SConfigItem *pItem;
ECfgSrcType stype;
int32_t numOfCores;
int64_t totalMemoryKB;
pItem = cfgGetItem(tsCfg, "numOfCores");
if (pItem == NULL) {
return -1;
} else {
stype = pItem->stype;
numOfCores = pItem->fval;
}
pItem = cfgGetItem(tsCfg, "supportVnodes");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfSupportVnodes = numOfCores * 2;
tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
pItem->i32 = tsNumOfSupportVnodes;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfRpcThreads = numOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
pItem->i32 = tsNumOfRpcThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfCommitThreads = numOfCores / 2;
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
pItem->i32 = tsNumOfCommitThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfMnodeReadThreads = numOfCores / 8;
tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
pItem->i32 = tsNumOfMnodeReadThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfVnodeQueryThreads = numOfCores * 2;
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
pItem->i32 = tsNumOfVnodeQueryThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfVnodeStreamThreads = numOfCores / 4;
tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
pItem->i32 = tsNumOfVnodeStreamThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfVnodeFetchThreads = numOfCores / 4;
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
pItem->i32 = tsNumOfVnodeFetchThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfVnodeWriteThreads = numOfCores;
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
pItem->i32 = tsNumOfVnodeWriteThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfVnodeSyncThreads = numOfCores * 2;
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
pItem->i32 = tsNumOfVnodeSyncThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfVnodeRsmaThreads = numOfCores;
tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
pItem->i32 = tsNumOfVnodeRsmaThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfQnodeQueryThreads = numOfCores * 2;
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
pItem->i32 = tsNumOfQnodeQueryThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfQnodeFetchThreads = numOfCores / 2;
tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
pItem->i32 = tsNumOfQnodeFetchThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfSnodeSharedThreads = numOfCores / 4;
tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
pItem->i32 = tsNumOfSnodeSharedThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfSnodeUniqueThreads = numOfCores / 4;
tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
pItem->i32 = tsNumOfSnodeUniqueThreads;
pItem->stype = stype;
}
pItem = cfgGetItem(tsCfg, "totalMemoryKB");
if (pItem == NULL) {
return -1;
} else {
stype = pItem->stype;
totalMemoryKB = pItem->i64;
}
pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1;
tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
pItem->i64 = tsRpcQueueMemoryAllowed;
pItem->stype = stype;
}
return 0;
}
static void taosSetClientLogCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "logDir");
tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX);
@ -981,7 +1127,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
taosSetServerLogCfg(pCfg);
}
taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
if (taosMulMkDir(tsLogDir) != 0) {
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
@ -1048,6 +1194,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
if (taosSetClientCfg(tsCfg)) return -1;
} else {
if (taosSetClientCfg(tsCfg)) return -1;
if (taosUpdateServerCfg(tsCfg)) return -1;
if (taosSetServerCfg(tsCfg)) return -1;
if (taosSetTfsCfg(tsCfg) != 0) return -1;
}
@ -1072,7 +1219,7 @@ void taosCleanupCfg() {
void taosCfgDynamicOptions(const char *option, const char *value) {
if (strncasecmp(option, "debugFlag", 9) == 0) {
int32_t flag = atoi(value);
taosSetAllDebugFlag(flag);
taosSetAllDebugFlag(flag, true);
return;
}
@ -1097,11 +1244,13 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
"tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
"jniDebugFlag",
};
int32_t *optionVars[] = {
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
&tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
&tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
&jniDebugFlag,
};
int32_t optionSize = tListLen(options);
@ -1113,41 +1262,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t flag = atoi(value);
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
*optionVars[d] = flag;
taosSetDebugFlag(optionVars[d], optName, flag);
taosSetDebugFlag(optionVars[d], optName, flag, true);
return;
}
uError("failed to cfg dynamic option:%s value:%s", option, value);
}
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) {
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
if (pItem != NULL) {
if (pItem != NULL && (rewrite || pItem->i32 == 0)) {
pItem->i32 = flagVal;
}
*pFlagPtr = flagVal;
}
void taosSetAllDebugFlag(int32_t flag) {
void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
if (flag <= 0) return;
taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite);
taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite);
taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite);
taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite);
taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite);
taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite);
taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite);
taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite);
taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite);
taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite);
taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite);
taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite);
taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite);
taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite);
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite);
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}

View File

@ -3330,7 +3330,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
return 0;
}
void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp*)pRsp)->pSchemas); }
void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
void tFreeSTableIndexRsp(void *info) {
if (NULL == info) {
@ -5119,17 +5119,17 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) {
} else {
pRsp->pMeta = NULL;
}
tEndDecode(pCoder);
return 0;
}
void tFreeSVCreateTbRsp(void* param) {
void tFreeSVCreateTbRsp(void *param) {
if (NULL == param) {
return;
}
SVCreateTbRsp* pRsp = (SVCreateTbRsp*)param;
SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param;
if (pRsp->pMeta) {
taosMemoryFree(pRsp->pMeta->pSchemas);
taosMemoryFree(pRsp->pMeta);
@ -5141,6 +5141,7 @@ static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
if (tEncodeU64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeI8(pCoder, pReq->igNotExists) < 0) return -1;
tEndEncode(pCoder);
@ -5151,6 +5152,7 @@ static int32_t tDecodeSVDropTbReq(SDecoder *pCoder, SVDropTbReq *pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
if (tDecodeU64(pCoder, &pReq->suid) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->igNotExists) < 0) return -1;
tEndDecode(pCoder);
@ -5345,7 +5347,7 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
int32_t meta = 0;
if (tDecodeI32(pDecoder, &meta) < 0) return -1;
if (meta) {
@ -5393,12 +5395,12 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
return 0;
}
void tFreeSSubmitBlkRsp(void* param) {
void tFreeSSubmitBlkRsp(void *param) {
if (NULL == param) {
return;
}
SSubmitBlkRsp* pRsp = (SSubmitBlkRsp*)param;
SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param;
taosMemoryFree(pRsp->tblFName);
if (pRsp->pMeta) {
@ -5407,7 +5409,6 @@ void tFreeSSubmitBlkRsp(void* param) {
}
}
void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
if (NULL == pRsp) return;
@ -5619,7 +5620,6 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) {
}
}
int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
@ -5671,8 +5671,6 @@ void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) {
}
}
int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) {
if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1;
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
@ -5690,7 +5688,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal)
int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1;
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1;
if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@ -5712,7 +5710,7 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
snprintf(buf, maxLen, "offset(reset to latest)");
} else if (pVal->type == TMQ_OFFSET__LOG) {
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
} else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
} else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
} else {
ASSERT(0);
@ -5813,17 +5811,17 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
return 0;
}
int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp) {
int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) {
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
if(tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
if(tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
return 0;
}
int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp) {
int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1;
if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t*)&pRsp->metaRspLen) < 0) return -1;
if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1;
return 0;
}
@ -5893,6 +5891,99 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
return 0;
}
void tDeleteSMqDataRsp(SMqDataRsp *pRsp) {
taosArrayDestroy(pRsp->blockDataLen);
taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
}
int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
if (pRsp->blockNum != 0) {
if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1;
for (int32_t i = 0; i < pRsp->blockNum; i++) {
int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i);
void *data = taosArrayGetP(pRsp->blockData, i);
if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1;
if (pRsp->withSchema) {
SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i);
if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1;
}
if (pRsp->withTbName) {
char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i);
if (tEncodeCStr(pEncoder, tbName) < 0) return -1;
}
}
}
if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
if (pRsp->createTableNum) {
for (int32_t i = 0; i < pRsp->createTableNum; i++) {
void *createTableReq = taosArrayGetP(pRsp->createTableReq, i);
int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i);
if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1;
}
}
return 0;
}
int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
if (pRsp->blockNum != 0) {
pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *));
pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1;
if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1;
if (pRsp->withTbName) {
pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *));
}
if (pRsp->withSchema) {
pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *));
}
for (int32_t i = 0; i < pRsp->blockNum; i++) {
void *data;
uint64_t bLen;
if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1;
taosArrayPush(pRsp->blockData, &data);
int32_t len = bLen;
taosArrayPush(pRsp->blockDataLen, &len);
if (pRsp->withSchema) {
SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper));
if (pSW == NULL) return -1;
if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1;
taosArrayPush(pRsp->blockSchema, &pSW);
}
if (pRsp->withTbName) {
char *tbName;
if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1;
taosArrayPush(pRsp->blockTbName, &tbName);
}
}
}
if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
if (pRsp->createTableNum) {
pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t));
pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *));
for (int32_t i = 0; i < pRsp->createTableNum; i++) {
void *pCreate = NULL;
uint64_t len;
if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1;
int32_t l = (int32_t)len;
taosArrayPush(pRsp->createTableLen, &l);
taosArrayPush(pRsp->createTableReq, &pCreate);
}
}
return 0;
}
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;

View File

@ -301,7 +301,7 @@ int32_t dmInitServer(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
SRpcInit rpcInit = {0};
strncpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn));
strncpy(rpcInit.localFqdn, tsLocalFqdn, TSDB_FQDN_LEN);
rpcInit.localPort = tsServerPort;
rpcInit.label = "DND-S";
rpcInit.numOfThreads = tsNumOfRpcThreads;

View File

@ -88,7 +88,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_VGROUP;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) {
type = TSDB_MGMT_TABLE_CONSUMERS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIPTIONS, len) == 0) {
} else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIPTIONS, len) == 0) {
type = TSDB_MGMT_TABLE_SUBSCRIPTIONS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) {
type = TSDB_MGMT_TABLE_TRANS;
@ -102,9 +102,9 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_QUERIES;
} else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) {
type = TSDB_MGMT_TABLE_VNODES;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) {
} else if (strncasecmp(name, TSDB_INS_TABLE_TOPICS, len) == 0) {
type = TSDB_MGMT_TABLE_TOPICS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) {
} else if (strncasecmp(name, TSDB_INS_TABLE_STREAMS, len) == 0) {
type = TSDB_MGMT_TABLE_STREAMS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) {
type = TSDB_MGMT_TABLE_APPS;

View File

@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
if (consumerVgNum > minVgCnt) {
if (imbCnt < imbConsumerNum) {
if (consumerVgNum == minVgCnt + 1) {
imbCnt++;
continue;
} else {
// pop until equal minVg + 1

View File

@ -763,8 +763,9 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
int32_t cols = 0;
char topicName[TSDB_TOPIC_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);
tNameGetDbName(&n, varDataVal(topicName));
strcpy(varDataVal(topicName), mndGetDbStr(pTopic->name));
/*tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);*/
/*tNameGetDbName(&n, varDataVal(topicName));*/
varDataSetLen(topicName, strlen(varDataVal(topicName)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)topicName, false);

View File

@ -95,6 +95,7 @@ struct SRSmaStat {
int64_t refId; // shared by fetch tasks
volatile int64_t nBufItems; // number of items in queue buffer
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
volatile int32_t nFetchAll; // active number of fetch all
int8_t triggerStat; // shared by fetch tasks
int8_t commitStat; // 0 not in committing, 1 in committing
SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)

View File

@ -67,21 +67,21 @@ typedef struct {
// tqExec
typedef struct {
char* qmsg;
char* qmsg;
} STqExecCol;
typedef struct {
int64_t suid;
int64_t suid;
} STqExecTb;
typedef struct {
SHashObj* pFilterOutTbUid;
SHashObj* pFilterOutTbUid;
} STqExecDb;
typedef struct {
int8_t subType;
STqReader* pExecReader;
STqReader* pExecReader;
qTaskInfo_t task;
union {
STqExecCol execCol;
@ -144,7 +144,7 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec
int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp);
int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp);
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp);
// tqMeta
@ -175,22 +175,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
char* tqOffsetBuildFName(const char* path, int32_t ver);
int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
pOffsetVal->uid = uid;
pOffsetVal->ts = ts;
}
static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) {
pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META;
pOffsetVal->uid = uid;
}
static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
pOffsetVal->type = TMQ_OFFSET__LOG;
pOffsetVal->version = ver;
}
// tqStream
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask);

View File

@ -172,7 +172,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
TdDirPtr pDir = NULL;
TdDirEntryPtr pDirEntry = NULL;
char dir[TSDB_FILENAME_LEN];
const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$";
const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
regex_t regex;
int code = 0;
@ -312,7 +312,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
int32_t nLoops = 0;
int32_t nLoops = 0;
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
@ -327,7 +327,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
ASSERT(pRSmaStat->commitAppliedVer > 0);
// step 2: wait for all triggered fetch tasks to finish
nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
@ -351,7 +351,8 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
return TSDB_CODE_FAILED;
}
smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma),
(void *)taosGetSelfPthreadId());
nLoops = 0;
while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
++nLoops;
@ -366,7 +367,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
#if 0 // consuming task of qTaskInfo clone
#if 0 // consuming task of qTaskInfo clone
// step 4: swap queue/qall and iQueue/iQall
// lock
// taosWLockLatch(SMA_ENV_LOCK(pEnv));

View File

@ -16,17 +16,17 @@
#include "sma.h"
#include "tsdb.h"
static int32_t smaEvalDays(SRetention *r, int8_t precision);
static int32_t smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration);
static int32_t smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
static int32_t rsmaRestore(SSma *pSma);
#define SMA_SET_KEEP_CFG(l) \
#define SMA_SET_KEEP_CFG(v, l) \
do { \
SRetention *r = &pCfg->retentions[l]; \
pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
pKeepCfg->keep0 = pKeepCfg->keep2; \
pKeepCfg->keep1 = pKeepCfg->keep2; \
pKeepCfg->days = smaEvalDays(r, pCfg->precision); \
pKeepCfg->days = smaEvalDays(v, pCfg->retentions, l, pCfg->precision, pCfg->days); \
} while (0)
#define SMA_OPEN_RSMA_IMPL(v, l) \
@ -38,51 +38,78 @@ static int32_t rsmaRestore(SSma *pSma);
} \
break; \
} \
smaSetKeepCfg(&keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg) < 0) { \
goto _err; \
} \
} while (0)
#define RETENTION_DAYS_SPLIT_RATIO 10
#define RETENTION_DAYS_SPLIT_MIN 1
#define RETENTION_DAYS_SPLIT_MAX 30
/**
* @brief Evaluate days(duration) for rsma level 1/2/3.
* 1) level 1: duration from "create database"
* 2) level 2/3: duration * (freq/freqL1)
* @param pVnode
* @param r
* @param level
* @param precision
* @param duration
* @return int32_t
*/
static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration) {
int32_t freqDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->freq, precision, TIME_UNIT_MINUTE);
int32_t keepDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->keep, precision, TIME_UNIT_MINUTE);
int32_t days = duration; // min
static int32_t smaEvalDays(SRetention *r, int8_t precision) {
int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
if (days <= RETENTION_DAYS_SPLIT_MIN) {
days = RETENTION_DAYS_SPLIT_MIN;
if (days < freqDays) {
days = freqDays + 1;
}
} else {
if (days > RETENTION_DAYS_SPLIT_MAX) {
days = RETENTION_DAYS_SPLIT_MAX;
}
if (days < freqDays) {
days = freqDays + 1;
}
if (days < freqDuration) {
days = freqDuration;
}
return days * 1440;
if (days > keepDuration) {
days = keepDuration;
}
if (level == TSDB_RETENTION_L0) {
goto end;
}
ASSERT(level >= TSDB_RETENTION_L1 && level <= TSDB_RETENTION_L2);
freqDuration = convertTimeFromPrecisionToUnit((r + level)->freq, precision, TIME_UNIT_MINUTE);
keepDuration = convertTimeFromPrecisionToUnit((r + level)->keep, precision, TIME_UNIT_MINUTE);
int32_t nFreqTimes = (r + level)->freq / (r + TSDB_RETENTION_L0)->freq;
days *= (nFreqTimes > 1 ? nFreqTimes : 1);
if (days > keepDuration) {
days = keepDuration;
}
if (days > TSDB_MAX_DURATION_PER_FILE) {
days = TSDB_MAX_DURATION_PER_FILE;
}
if (days < freqDuration) {
days = freqDuration;
}
end:
smaInfo("vgId:%d, evaluated duration for level %" PRIi8 " is %d, raw val:%d", TD_VID(pVnode), level + 1, days, duration);
return days;
}
int smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
pKeepCfg->precision = pCfg->precision;
switch (type) {
case TSDB_TYPE_TSMA:
ASSERT(0);
break;
case TSDB_TYPE_RSMA_L0:
SMA_SET_KEEP_CFG(0);
SMA_SET_KEEP_CFG(pVnode, 0);
break;
case TSDB_TYPE_RSMA_L1:
SMA_SET_KEEP_CFG(1);
SMA_SET_KEEP_CFG(pVnode, 1);
break;
case TSDB_TYPE_RSMA_L2:
SMA_SET_KEEP_CFG(2);
SMA_SET_KEEP_CFG(pVnode, 2);
break;
default:
ASSERT(0);
@ -148,11 +175,11 @@ int32_t smaClose(SSma *pSma) {
/**
* @brief rsma env restore
*
* @param pSma
* @param type
* @param committedVer
* @return int32_t
*
* @param pSma
* @param type
* @param committedVer
* @return int32_t
*/
int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
ASSERT(VND_IS_RSMA(pSma->pVnode));

View File

@ -28,11 +28,10 @@ SSmaMgmt smaMgmt = {
.rsetId = -1,
};
#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
#define TD_QTASKINFO_FNAME_PREFIX "qinf.v"
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
@ -83,11 +82,6 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
struct SRSmaExecQItem {
void *pRSmaInfo;
void *qall;
};
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@ -1084,9 +1078,6 @@ static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTa
goto _err;
}
SSmaEnv *pRSmaEnv = pSma->pRSmaEnv;
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pRSmaEnv);
SRSmaQTaskInfoIter fIter = {0};
if (tdRSmaQTaskInfoIterInit(&fIter, &tFile) < 0) {
tdRSmaQTaskInfoIterDestroy(&fIter);
@ -1714,9 +1705,14 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type);
}
if (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2) == 0) {
int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2);
if (oldStat == 0 ||
((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) {
atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1);
tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) {
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
}
}
if (qallItemSize > 0) {
@ -1738,7 +1734,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
break;
}
}
ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
}
if (type == RSMA_EXEC_COMMIT) {
@ -1767,7 +1763,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
}
// tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
}
ASSERT(taosQueueItemSize(pInfo->iQueue) == 0);

View File

@ -357,8 +357,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
TD_VID(pTq->pVnode), formatBuf);
} else {
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
if (pReq->useSnapshot){
if (pHandle->fetchMeta){
if (pReq->useSnapshot) {
if (pHandle->fetchMeta) {
tqOffsetResetToMeta(&fetchOffsetNew, 0);
} else {
tqOffsetResetToData(&fetchOffsetNew, 0, 0);
@ -373,43 +373,47 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
goto OVER;
tDeleteSMqDataRsp(&dataRsp);
return code;
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64
" in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
code = -1;
goto OVER;
tDeleteSMqDataRsp(&dataRsp);
return code;
}
}
}
if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN || fetchOffsetNew.type != TMQ_OFFSET__LOG){
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN || fetchOffsetNew.type != TMQ_OFFSET__LOG) {
SMqMetaRsp metaRsp = {0};
tqScan(pTq, pHandle, &dataRsp, &metaRsp, &fetchOffsetNew);
if(metaRsp.metaRspLen > 0){
if (metaRsp.metaRspLen > 0) {
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
code = -1;
}
tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId, pHandle->subKey,
TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.version);
tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId,
pHandle->subKey, TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
metaRsp.rspOffset.version);
taosMemoryFree(metaRsp.metaRsp);
goto OVER;
}
if (dataRsp.blockNum > 0){
if (dataRsp.blockNum > 0) {
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
goto OVER;
}else{
} else {
fetchOffsetNew = dataRsp.rspOffset;
}
tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld", consumerId, pHandle->subKey,
TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type, dataRsp.rspOffset.uid, dataRsp.rspOffset.version);
tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld",
consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type,
dataRsp.rspOffset.uid, dataRsp.rspOffset.version);
}
if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN && fetchOffsetNew.type == TMQ_OFFSET__LOG) {
@ -426,7 +430,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
consumerEpoch = atomic_load_32(&pHandle->epoch);
if (consumerEpoch > reqEpoch) {
tqWarn("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, vg %d offset %" PRId64
", found new consumer epoch %d, discard req epoch %d",
", found new consumer epoch %d, discard req epoch %d",
consumerId, pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), fetchVer, consumerEpoch, reqEpoch);
break;
}
@ -449,7 +453,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
if (tqLogScanExec(pTq, &pHandle->execHandle, pCont, &dataRsp) < 0) {
if (tqLogScanExec(pTq, pHandle, pCont, &dataRsp) < 0) {
/*ASSERT(0);*/
}
// TODO batch optimization:
@ -490,18 +494,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
OVER:
if (pCkHead) taosMemoryFree(pCkHead);
// TODO wrap in destroy func
taosArrayDestroy(dataRsp.blockDataLen);
taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree);
if (dataRsp.withSchema) {
taosArrayDestroyP(dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
}
if (dataRsp.withTbName) {
taosArrayDestroyP(dataRsp.blockTbName, (FDelete)taosMemoryFree);
}
tDeleteSMqDataRsp(&dataRsp);
return code;
}
@ -629,9 +622,9 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta, (SSnapContext **)(&handle.sContext));
pHandle->execHandle.task =
qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
(SSnapContext**)(&handle.sContext));
pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
}
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);

View File

@ -60,6 +60,46 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
return 0;
}
int64_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
const STqExecHandle* pExec = &pHandle->execHandle;
ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
qTaskInfo_t task = pExec->task;
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
if (pOffset->type == TMQ_OFFSET__LOG) {
pRsp->rspOffset = *pOffset;
return 0;
} else {
tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
pRsp->rspOffset = *pOffset;
return 0;
}
}
}
int32_t rowCnt = 0;
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
tqDebug("tmq task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
tqDebug("tmq task execute end, get %p", pDataBlock);
if (pDataBlock) {
tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
pRsp->blockNum++;
}
}
return 0;
}
int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) {
const STqExecHandle* pExec = &pHandle->execHandle;
qTaskInfo_t task = pExec->task;
@ -102,18 +142,18 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
taosArrayPush(pRsp->blockTbName, &tbName);
}
}
if(pRsp->withSchema){
if (pRsp->withSchema) {
if (pOffset->type == TMQ_OFFSET__LOG) {
tqAddBlockSchemaToRsp(pExec, pRsp);
}else{
} else {
SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
taosArrayPush(pRsp->blockSchema, &pSW);
}
}
if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
}else{
} else {
tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
}
pRsp->blockNum++;
@ -125,17 +165,9 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
}
}
if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){
if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
tqDebug("vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
pHandle->snapshotVer + 1);
tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
continue;
}
}else{
if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA){
if(qStreamExtractPrepareUid(task) != 0){
if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) {
if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
if (qStreamExtractPrepareUid(task) != 0) {
continue;
}
tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
@ -143,13 +175,13 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
break;
}
if (pRsp->blockNum > 0){
if (pRsp->blockNum > 0) {
tqDebug("tmqsnap task exec exited, get data");
break;
}
SMqMetaRsp* tmp = qStreamExtractMetaMsg(task);
if(tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA){
if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts);
qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META;
@ -173,57 +205,8 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
return 0;
}
#if 0
int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal offset, int32_t workerId) {
ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
qTaskInfo_t task = pExec->execCol.task[workerId];
if (qStreamPrepareTsdbScan(task, offset.uid, offset.ts) < 0) {
ASSERT(0);
}
int32_t rowCnt = 0;
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
if (pDataBlock == NULL) break;
ASSERT(pDataBlock->info.rows != 0);
ASSERT(taosArrayGetSize(pDataBlock->pDataBlock) != 0);
tqAddBlockDataToRsp(pDataBlock, pRsp);
if (pRsp->withTbName) {
pRsp->withTbName = 0;
#if 0
int64_t uid;
int64_t ts;
if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
ASSERT(0);
}
tqAddTbNameToRsp(pTq, uid, pRsp);
#endif
}
pRsp->blockNum++;
rowCnt += pDataBlock->info.rows;
if (rowCnt >= 4096) break;
}
int64_t uid;
int64_t ts;
if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
ASSERT(0);
}
tqOffsetResetToData(&pRsp->rspOffset, uid, ts);
return 0;
}
#endif
int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp) {
int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp) {
STqExecHandle* pExec = &pHandle->execHandle;
ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN);
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
@ -268,6 +251,28 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
tqAddBlockSchemaToRsp(pExec, pRsp);
pRsp->blockNum++;
}
#if 0
if (pHandle->fetchMeta && pRsp->blockNum) {
SSubmitMsgIter iter = {0};
tInitSubmitMsgIter(pReq, &iter);
STaosxRsp* pXrsp = (STaosxRsp*)pRsp;
while (1) {
SSubmitBlk* pBlk = NULL;
if (tGetSubmitMsgNext(&iter, &pBlk) < 0) return -1;
if (pBlk->schemaLen > 0) {
if (pXrsp->createTableNum == 0) {
pXrsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
pXrsp->createTableReq = taosArrayInit(0, sizeof(void*));
}
void* createReq = taosMemoryCalloc(1, pBlk->schemaLen);
memcpy(createReq, pBlk->data, pBlk->schemaLen);
taosArrayPush(pXrsp->createTableLen, &pBlk->schemaLen);
taosArrayPush(pXrsp->createTableReq, &createReq);
pXrsp->createTableNum++;
}
}
}
#endif
}
if (pRsp->blockNum == 0) {

View File

@ -18,12 +18,25 @@
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
if (tEncodeI8(pEncoder, pHandle->fetchMeta) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1;
if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tEncodeCStr(pEncoder, pHandle->execHandle.execCol.qmsg) < 0) return -1;
} else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){
int32_t size = taosHashGetSize(pHandle->execHandle.execDb.pFilterOutTbUid);
if (tEncodeI32(pEncoder, size) < 0) return -1;
void *pIter = NULL;
pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
while(pIter){
int64_t *tbUid = (int64_t *)taosHashGetKey(pIter, NULL);
if (tEncodeI64(pEncoder, *tbUid) < 0) return -1;
pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
}
} else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){
if (tEncodeI64(pEncoder, pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndEncode(pEncoder);
return pEncoder->pos;
@ -32,12 +45,25 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
if (tDecodeI8(pDecoder, &pHandle->fetchMeta) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1;
if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.execCol.qmsg) < 0) return -1;
}else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){
pHandle->execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
int32_t size = 0;
if (tDecodeI32(pDecoder, &size) < 0) return -1;
for(int32_t i = 0; i < size; i++){
int64_t tbUid = 0;
if (tDecodeI64(pDecoder, &tbUid) < 0) return -1;
taosHashPut(pHandle->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
}
} else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){
if (tDecodeI64(pDecoder, &pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
@ -267,14 +293,28 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
ASSERT(scanner);
handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(handle.execHandle.pExecReader);
} else {
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
handle.execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
// handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext));
handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext));
handle.execHandle.task =
qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
vnodeGetCtbIdList(pTq->pVnode, handle.execHandle.execTb.suid, tbUidList);
tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
tqDebug("vgId:%d, idx %d, uid:%" PRId64, TD_VID(pTq->pVnode), i, tbUid);
}
handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
tqReaderSetTbUidList(handle.execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext));
handle.execHandle.task =
qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
}

View File

@ -15,6 +15,162 @@
#include "tq.h"
bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
if(pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE){
return true;
}
int16_t msgType = pHead->msgType;
char* body = pHead->body;
int32_t bodyLen = pHead->bodyLen;
int64_t tbSuid = pHandle->execHandle.execTb.suid;
int64_t realTbSuid = 0;
SDecoder coder;
void* data = POINTER_SHIFT(body, sizeof(SMsgHead));
int32_t len = bodyLen - sizeof(SMsgHead);
tDecoderInit(&coder, data, len);
if (msgType == TDMT_VND_CREATE_STB || msgType == TDMT_VND_ALTER_STB) {
SVCreateStbReq req = {0};
if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
goto end;
}
realTbSuid = req.suid;
} else if (msgType == TDMT_VND_DROP_STB) {
SVDropStbReq req = {0};
if (tDecodeSVDropStbReq(&coder, &req) < 0) {
goto end;
}
realTbSuid = req.suid;
} else if (msgType == TDMT_VND_CREATE_TABLE) {
SVCreateTbBatchReq req = {0};
if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
goto end;
}
int32_t needRebuild = 0;
SVCreateTbReq* pCreateReq = NULL;
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
needRebuild++;
}
}
if(needRebuild == 0){
// do nothing
}else if(needRebuild == req.nReqs){
realTbSuid = tbSuid;
}else{
realTbSuid = tbSuid;
SVCreateTbBatchReq reqNew = {0};
reqNew.pArray = taosArrayInit(req.nReqs, sizeof(struct SVCreateTbReq));
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
reqNew.nReqs++;
taosArrayPush(reqNew.pArray, pCreateReq);
}
}
int tlen;
int32_t ret = 0;
tEncodeSize(tEncodeSVCreateTbBatchReq, &reqNew, tlen, ret);
void* buf = taosMemoryMalloc(tlen);
if (NULL == buf) {
taosArrayDestroy(reqNew.pArray);
goto end;
}
SEncoder coderNew = {0};
tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
tEncodeSVCreateTbBatchReq(&coderNew, &reqNew);
tEncoderClear(&coderNew);
memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
pHead->bodyLen = tlen + sizeof(SMsgHead);
taosMemoryFree(buf);
taosArrayDestroy(reqNew.pArray);
}
} else if (msgType == TDMT_VND_ALTER_TABLE) {
SVAlterTbReq req = {0};
if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
goto end;
}
SMetaReader mr = {0};
metaReaderInit(&mr, pHandle->execHandle.pExecReader->pVnodeMeta, 0);
if (metaGetTableEntryByName(&mr, req.tbName) < 0) {
metaReaderClear(&mr);
goto end;
}
realTbSuid = mr.me.ctbEntry.suid;
metaReaderClear(&mr);
} else if (msgType == TDMT_VND_DROP_TABLE) {
SVDropTbBatchReq req = {0};
if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
goto end;
}
int32_t needRebuild = 0;
SVDropTbReq* pDropReq = NULL;
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pDropReq = req.pReqs + iReq;
if(pDropReq->suid == tbSuid){
needRebuild++;
}
}
if(needRebuild == 0){
// do nothing
}else if(needRebuild == req.nReqs){
realTbSuid = tbSuid;
}else{
realTbSuid = tbSuid;
SVDropTbBatchReq reqNew = {0};
reqNew.pArray = taosArrayInit(req.nReqs, sizeof(SVDropTbReq));
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pDropReq = req.pReqs + iReq;
if(pDropReq->suid == tbSuid){
reqNew.nReqs++;
taosArrayPush(reqNew.pArray, pDropReq);
}
}
int tlen;
int32_t ret = 0;
tEncodeSize(tEncodeSVDropTbBatchReq, &reqNew, tlen, ret);
void* buf = taosMemoryMalloc(tlen);
if (NULL == buf) {
taosArrayDestroy(reqNew.pArray);
goto end;
}
SEncoder coderNew = {0};
tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
tEncodeSVDropTbBatchReq(&coderNew, &reqNew);
tEncoderClear(&coderNew);
memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
pHead->bodyLen = tlen + sizeof(SMsgHead);
taosMemoryFree(buf);
taosArrayDestroy(reqNew.pArray);
}
} else if (msgType == TDMT_VND_DELETE) {
SDeleteRes req = {0};
if (tDecodeDeleteRes(&coder, &req) < 0) {
goto end;
}
realTbSuid = req.suid;
} else{
ASSERT(0);
}
end:
tDecoderClear(&coder);
return tbSuid == realTbSuid;
}
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead) {
int32_t code = 0;
taosThreadMutexLock(&pHandle->pWalReader->mutex);
@ -53,9 +209,11 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
code = -1;
goto END;
}
*fetchOffset = offset;
code = 0;
goto END;
if(isValValidForTable(pHandle, pHead)){
*fetchOffset = offset;
code = 0;
goto END;
}
}
}
code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead);

View File

@ -471,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p
len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName);
appendColumnFields(buf2, &len, pCfg);
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")");
appendTableOptions(buf2, &len, pDbCfg, pCfg);
}
varDataLen(buf2) = len;

View File

@ -143,6 +143,8 @@ typedef struct {
STqOffsetVal prepareStatus; // for tmq
STqOffsetVal lastStatus; // for tmq
SMqMetaRsp metaRsp; // for tmq fetching meta
int64_t snapshotVer;
SSchemaWrapper *schema;
char tbName[TSDB_TABLE_NAME_LEN];
SSDataBlock* pullOverBlk; // for streaming
@ -303,6 +305,7 @@ typedef struct SAggSupporter {
char* keyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
int32_t currentPageId; // current write page id
} SAggSupporter;
typedef struct {
@ -327,7 +330,6 @@ typedef struct STableScanInfo {
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
SSampleExecInfo sample; // sample execution info
int32_t currentGroupId;
int32_t currentTable;
@ -431,6 +433,7 @@ typedef struct SStreamAggSupporter {
char* pKeyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
int32_t currentPageId; // buffer page that is active
SSDataBlock* pScanBlock;
} SStreamAggSupporter;
@ -485,24 +488,23 @@ typedef struct SStreamScanInfo {
STimeWindowAggSupp twAggSup;
SSDataBlock* pUpdateDataRes;
// status for tmq
// SSchemaWrapper schema;
SNodeList* pGroupTags;
SNode* pTagCond;
SNode* pTagIndexCond;
SNodeList* pGroupTags;
SNode* pTagCond;
SNode* pTagIndexCond;
} SStreamScanInfo;
typedef struct SStreamRawScanInfo{
// int8_t subType;
// bool withMeta;
// int64_t suid;
// int64_t snapVersion;
// void *metaInfo;
// void *dataInfo;
SVnode* vnode;
SSDataBlock pRes; // result SSDataBlock
STsdbReader* dataReader;
SSnapContext* sContext;
}SStreamRawScanInfo;
typedef struct {
// int8_t subType;
// bool withMeta;
// int64_t suid;
// int64_t snapVersion;
// void *metaInfo;
// void *dataInfo;
SVnode* vnode;
SSDataBlock pRes; // result SSDataBlock
STsdbReader* dataReader;
SSnapContext* sContext;
} SStreamRawScanInfo;
typedef struct SSysTableScanInfo {
SRetrieveMetaTableRsp* pRsp;
@ -527,14 +529,14 @@ typedef struct SBlockDistInfo {
SSDataBlock* pResBlock;
void* pHandle;
SReadHandle readHandle;
uint64_t uid; // table uid
uint64_t uid; // table uid
} SBlockDistInfo;
// todo remove this
typedef struct SOptrBasicInfo {
SResultRowInfo resultRowInfo;
SSDataBlock* pRes;
bool mergeResultBlock;
SResultRowInfo resultRowInfo;
SSDataBlock* pRes;
bool mergeResultBlock;
} SOptrBasicInfo;
typedef struct SIntervalAggOperatorInfo {
@ -1009,7 +1011,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size);
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,

View File

@ -46,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
rowSize += pCtx[i].resDataInfo.interBufSize;
}
rowSize +=
(numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
rowSize += (numOfOutput * sizeof(bool));
// expand rowSize to mark if col is null for top/bottom result(saveTupleData)
return rowSize;
}
@ -1178,7 +1178,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
SqlFunctionCtx* pCtx = &pFuncCtx[i];
pCtx->functionId = -1;
pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
@ -1191,7 +1190,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
strncpy(pCtx->udfName, udfName, strlen(udfName));
strncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN);
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
@ -1222,6 +1221,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->isStream = false;
pCtx->param = pFunct->pParam;
pCtx->saveHandle.currentPage = -1;
}
for (int32_t i = 1; i < numOfOutput; ++i) {

View File

@ -139,7 +139,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) {
if (msg == NULL) {
// TODO create raw scan
// create raw scan
SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
if (NULL == pTaskInfo) {
@ -151,7 +151,7 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
pTaskInfo->cost.created = taosGetTimestampMs();
pTaskInfo->execModel = OPTR_EXEC_MODEL_QUEUE;
pTaskInfo->pRoot = createRawScanOperatorInfo(readers, pTaskInfo);
if(NULL == pTaskInfo->pRoot){
if (NULL == pTaskInfo->pRoot) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pTaskInfo);
return NULL;
@ -834,11 +834,11 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
} else {
ASSERT(0);
}
}else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA){
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
SStreamRawScanInfo* pInfo = pOperator->info;
SSnapContext* sContext = pInfo->sContext;
if(setForSnapShot(sContext, pOffset->uid) != 0) {
qError("setDataForSnapShot error. uid:%"PRIi64, pOffset->uid);
SSnapContext* sContext = pInfo->sContext;
if (setForSnapShot(sContext, pOffset->uid) != 0) {
qError("setDataForSnapShot error. uid:%" PRIi64, pOffset->uid);
return -1;
}
@ -847,27 +847,29 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
pInfo->dataReader = NULL;
cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
taosArrayDestroy(pTaskInfo->tableqinfoList.pTableList);
if(mtInfo.uid == 0) return 0; // no data
if (mtInfo.uid == 0) return 0; // no data
initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, mtInfo);
pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts;
pTaskInfo->tableqinfoList.pTableList = taosArrayInit(1, sizeof(STableKeyInfo));
taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &(STableKeyInfo){.uid = mtInfo.uid, .groupId = 0});
tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList, &pInfo->dataReader, NULL);
tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList,
&pInfo->dataReader, NULL);
strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName);
tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
pTaskInfo->streamInfo.schema = mtInfo.schema;
qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts);
}else if(pOffset->type == TMQ_OFFSET__SNAPSHOT_META){
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) {
SStreamRawScanInfo* pInfo = pOperator->info;
SSnapContext* sContext = pInfo->sContext;
if(setForSnapShot(sContext, pOffset->uid) != 0) {
qError("setForSnapShot error. uid:%"PRIi64" ,version:%"PRIi64, pOffset->uid);
SSnapContext* sContext = pInfo->sContext;
if (setForSnapShot(sContext, pOffset->uid) != 0) {
qError("setForSnapShot error. uid:%" PRIi64 " ,version:%" PRIi64, pOffset->uid);
return -1;
}
qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %ld ts %ld", pOffset->uid);
}else if (pOffset->type == TMQ_OFFSET__LOG) {
} else if (pOffset->type == TMQ_OFFSET__LOG) {
SStreamRawScanInfo* pInfo = pOperator->info;
tsdbReaderClose(pInfo->dataReader);
pInfo->dataReader = NULL;

View File

@ -179,26 +179,23 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR
}
#endif
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) {
SFilePage* pData = NULL;
// in the first scan, new space needed for results
int32_t pageId = -1;
SIDList list = getDataBufPagesIdList(pResultBuf);
if (taosArrayGetSize(list) == 0) {
pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
if (*currentPageId == -1) {
pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
SPageInfo* pi = getLastPageInfo(list);
pData = getBufPage(pResultBuf, getPageId(pi));
pageId = getPageId(pi);
pData = getBufPage(pResultBuf, *currentPageId);
pageId = *currentPageId;
if (pData->num + interBufSize > getBufPageSize(pResultBuf)) {
// release current page first, and prepare the next one
releaseBufPageInfo(pResultBuf, pi);
releaseBufPage(pResultBuf, pData);
pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@ -215,9 +212,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
*currentPageId = pageId;
pData->num += interBufSize;
return pResultRow;
}
@ -263,18 +260,15 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
if (pResult == NULL) {
#ifdef BUF_PAGE_DEBUG
qDebug("page_2");
#endif
ASSERT(pSup->resultRowSize > 0);
pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
initResultRow(pResult);
// add a new result set for a new group
SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset};
tSimpleHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
sizeof(SResultRowPosition));
sizeof(SResultRowPosition));
}
// 2. set the new time window to be the new active time window
@ -302,7 +296,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
SIDList list = getDataBufPagesIdList(pResultBuf);
if (taosArrayGetSize(list) == 0) {
pData = getNewBufPage(pResultBuf, tid, &pageId);
pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
SPageInfo* pi = getLastPageInfo(list);
@ -313,7 +307,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// release current page first, and prepare the next one
releaseBufPageInfo(pResultBuf, pi);
pData = getNewBufPage(pResultBuf, tid, &pageId);
pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@ -2821,92 +2815,6 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
}
}
}
#if 0
int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
uint8_t type = pOperator->operatorType;
pOperator->status = OP_OPENED;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pScanInfo = pOperator->info;
pScanInfo->blockType = STREAM_INPUT__TABLE_SCAN;
pScanInfo->pTableScanOp->status = OP_OPENED;
STableScanInfo* pInfo = pScanInfo->pTableScanOp->info;
ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER);
if (uid == 0) {
pInfo->noTable = 1;
return TSDB_CODE_SUCCESS;
}
/*if (pSnapShotScanInfo->dataReader == NULL) {*/
/*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/
/*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/
/*}*/
pInfo->noTable = 0;
if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
bool found = false;
for (int32_t i = 0; i < tableSz; i++) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
if (pTableInfo->uid == uid) {
found = true;
pInfo->currentTable = i;
}
}
// TODO after processing drop, found can be false
ASSERT(found);
tsdbSetTableId(pInfo->dataReader, uid);
int64_t oldSkey = pInfo->cond.twindows.skey;
pInfo->cond.twindows.skey = ts + 1;
tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
pInfo->cond.twindows.skey = oldSkey;
pInfo->scanTimes = 0;
qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts,
pInfo->currentTable, tableSz);
}
return TSDB_CODE_SUCCESS;
} else {
if (pOperator->numOfDownstream == 1) {
return doPrepareScan(pOperator->pDownstream[0], uid, ts);
} else if (pOperator->numOfDownstream == 0) {
qError("failed to find stream scan operator to set the input data block");
return TSDB_CODE_QRY_APP_ERROR;
} else {
qError("join not supported for stream block scan");
return TSDB_CODE_QRY_APP_ERROR;
}
}
}
int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) {
int32_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pScanInfo = pOperator->info;
STableScanInfo* pSnapShotScanInfo = pScanInfo->pTableScanOp->info;
*uid = pSnapShotScanInfo->lastStatus.uid;
*ts = pSnapShotScanInfo->lastStatus.ts;
} else {
if (pOperator->pDownstream[0] == NULL) {
return TSDB_CODE_INVALID_PARA;
} else {
doGetScanStatus(pOperator->pDownstream[0], uid, ts);
}
}
return TSDB_CODE_SUCCESS;
}
#endif
// this is a blocking operator
static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
@ -3030,7 +2938,7 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
SResultRow* pRow = (SResultRow*)((char*)pPage + pos->offset);
setBufPageDirty(pPage, true);
releaseBufPage(pSup->pResultBuf, pPage);
int32_t iter = 0;
void* pIter = NULL;
while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) {
@ -3092,7 +3000,7 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
offset += sizeof(int32_t);
uint64_t tableGroupId = *(uint64_t*)(result + offset);
SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
if (!resultRow) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@ -3440,8 +3348,10 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
pAggSup->pResultRowHashTable = tSimpleHashInit(10, hashFn);
@ -3455,18 +3365,18 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
qError("Init stream agg supporter failed since %s", terrstr(terrno));
return terrno;
}
int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
qError("Create agg result buf failed since %s", tstrerror(code));
code = TSDB_CODE_NO_AVAIL_DISK;
qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey);
return code;
}
return TSDB_CODE_SUCCESS;
code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey);
return code;
}
return code;
}
void cleanupAggSup(SAggSupporter* pAggSup) {
@ -3488,7 +3398,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
}
for (int32_t i = 0; i < numOfCols; ++i) {
pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf;
}
return TSDB_CODE_SUCCESS;
@ -3520,6 +3430,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
@ -4071,8 +3982,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STableKeyInfo* pKeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
qDebug("creating stream task: add table %" PRId64, pKeyInfo->uid);
}
}
#endif
}
pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTaskInfo);
@ -4297,42 +4208,6 @@ SArray* extractColumnInfo(SNodeList* pNodeList) {
return pList;
}
#if 0
STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, const char* idstr) {
int32_t code = getTableList(pHandle->meta, pHandle->vnode, &pTableScanNode->scan, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
code = 0;
qDebug("no table qualified for query, %s", idstr);
goto _error;
}
SQueryTableDataCond cond = {0};
code = initQueryTableDataCond(&cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
STsdbReader* pReader;
code = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo->pTableList, &pReader, idstr);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
cleanupQueryTableDataCond(&cond);
return pReader;
_error:
terrno = code;
return NULL;
}
#endif
static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@ -4574,7 +4449,7 @@ _complete:
return code;
}
static void doDestroyTableList(STableListInfo* pTableqinfoList) {
void doDestroyTableList(STableListInfo* pTableqinfoList) {
taosArrayDestroy(pTableqinfoList->pTableList);
taosHashCleanup(pTableqinfoList->map);
if (pTableqinfoList->needSortTableByGroupId) {
@ -4678,6 +4553,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size) {
pSup->currentPageId = -1;
pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
@ -4705,7 +4581,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF
}
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir);
for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].pBuf = pSup->pResultBuf;
pCtx[i].saveHandle.pBuf = pSup->pResultBuf;
}
return code;
}

View File

@ -547,7 +547,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
int32_t pageId = 0;
pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
*(int32_t *) pPage = 0;
@ -562,7 +562,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
// add a new page for current group
int32_t pageId = 0;
pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
}

View File

@ -195,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
return PROJECT_RETRIEVE_DONE;
}
void printDataBlock1(SSDataBlock* pBlock, const char* flag) {
if (!pBlock || pBlock->info.rows == 0) {
qDebug("===stream===printDataBlock: Block is Null or Empty");
return;
}
char* pBuf = NULL;
qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
taosMemoryFreeClear(pBuf);
}
SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pProjectInfo->binfo;

View File

@ -1219,7 +1219,7 @@ static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) {
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
@ -1228,7 +1228,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
pInfo->pRes->info.type = STREAM_NORMAL;
pInfo->pRes->info.version = pBlock->info.version;
uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
uint64_t* groupIdPre = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupIdPre) {
pInfo->pRes->info.groupId = *groupIdPre;
} else {
@ -1276,11 +1276,80 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
return 0;
}
static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamScanInfo* pInfo = pOperator->info;
qDebug("queue scan called");
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
qDebug("queue scan tsdb return %d rows", pResult->info.rows);
return pResult;
} else {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->dataReader);
pTSInfo->dataReader = NULL;
tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
qDebug("queue scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1);
if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) {
return NULL;
}
ASSERT(pInfo->tqReader->pWalReader->curVersion == pTaskInfo->streamInfo.snapshotVer + 1);
}
}
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
while (1) {
SFetchRet ret = {0};
tqNextBlock(pInfo->tqReader, &ret);
if (ret.fetchType == FETCH_TYPE__DATA) {
blockDataCleanup(pInfo->pRes);
if (setBlockIntoRes(pInfo, &ret.data) < 0) {
ASSERT(0);
}
// TODO clean data block
if (pInfo->pRes->info.rows > 0) {
qDebug("queue scan log return %d rows", pInfo->pRes->info.rows);
return pInfo->pRes;
}
} else if (ret.fetchType == FETCH_TYPE__META) {
ASSERT(0);
// pTaskInfo->streamInfo.lastStatus = ret.offset;
// pTaskInfo->streamInfo.metaBlk = ret.meta;
// return NULL;
} else if (ret.fetchType == FETCH_TYPE__NONE) {
pTaskInfo->streamInfo.lastStatus = ret.offset;
ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version);
ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion);
char formatBuf[80];
tFormatOffset(formatBuf, 80, &ret.offset);
qDebug("queue scan log return null, offset %s", formatBuf);
return NULL;
} else {
ASSERT(0);
}
}
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
qDebug("stream scan tsdb return %d rows", pResult->info.rows);
return pResult;
}
qDebug("stream scan tsdb return null");
return NULL;
} else {
ASSERT(0);
return NULL;
}
}
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamScanInfo* pInfo = pOperator->info;
qDebug("stream scan called");
#if 0
SStreamState* pState = pTaskInfo->streamInfo.pState;
if (pState) {
@ -1317,48 +1386,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
#endif
qDebug("stream scan called");
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
while (1) {
SFetchRet ret = {0};
tqNextBlock(pInfo->tqReader, &ret);
if (ret.fetchType == FETCH_TYPE__DATA) {
blockDataCleanup(pInfo->pRes);
if (setBlockIntoRes(pInfo, &ret.data) < 0) {
ASSERT(0);
}
// TODO clean data block
if (pInfo->pRes->info.rows > 0) {
qDebug("stream scan log return %d rows", pInfo->pRes->info.rows);
return pInfo->pRes;
}
} else if (ret.fetchType == FETCH_TYPE__META) {
ASSERT(0);
// pTaskInfo->streamInfo.lastStatus = ret.offset;
// pTaskInfo->streamInfo.metaBlk = ret.meta;
// return NULL;
} else if (ret.fetchType == FETCH_TYPE__NONE) {
pTaskInfo->streamInfo.lastStatus = ret.offset;
ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version);
ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion);
char formatBuf[80];
tFormatOffset(formatBuf, 80, &ret.offset);
qDebug("stream scan log return null, offset %s", formatBuf);
return NULL;
} else {
ASSERT(0);
}
}
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
qDebug("stream scan tsdb return %d rows", pResult->info.rows);
return pResult;
}
qDebug("stream scan tsdb return null");
return NULL;
}
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
@ -1554,14 +1581,14 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
}
static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
// NOTE: this operator does never check if current status is done or not
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamRawScanInfo* pInfo = pOperator->info;
pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
pTaskInfo->streamInfo.metaRsp.metaRsp = NULL;
qDebug("tmqsnap doRawScan called");
if(pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA){
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pBlock = &pInfo->pRes;
if (pInfo->dataReader && tsdbNextDataBlock(pInfo->dataReader)) {
@ -1585,42 +1612,38 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
}
SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
if (mtInfo.uid == 0){ //read snapshot done, change to get data from wal
if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
qDebug("tmqsnap read snapshot done, change to get data from wal");
pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion;
tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
}else{
} else {
pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN;
qDebug("tmqsnap change get data uid:%ld", mtInfo.uid);
qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType);
strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName);
tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
pTaskInfo->streamInfo.schema = mtInfo.schema;
}
qDebug("tmqsnap stream scan tsdb return null");
return NULL;
}else if(pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META){
SSnapContext *sContext = pInfo->sContext;
void* data = NULL;
int32_t dataLen = 0;
int16_t type = 0;
int64_t uid = 0;
if(getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0){
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
SSnapContext* sContext = pInfo->sContext;
void* data = NULL;
int32_t dataLen = 0;
int16_t type = 0;
int64_t uid = 0;
if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) {
qError("tmqsnap getMetafromSnapShot error");
taosMemoryFreeClear(data);
return NULL;
}
if(!sContext->queryMetaOrData){ // change to get data next poll request
if (!sContext->queryMetaOrData) { // change to get data next poll request
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
pTaskInfo->streamInfo.lastStatus.uid = uid;
pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA;
pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0;
pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN;
}else{
} else {
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
pTaskInfo->streamInfo.lastStatus.uid = uid;
pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus;
@ -1631,44 +1654,44 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
return NULL;
}
// else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
// int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
//
// while(1){
// if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
// qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
// return NULL;
// }
// SWalCont* pHead = &pInfo->pCkHead->head;
// qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
//
// if (pHead->msgType == TDMT_VND_SUBMIT) {
// SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
// tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
// SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid, &pInfo->pRes);
// if(block){
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
// qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
// return block;
// }else{
// fetchVer++;
// }
// } else{
// ASSERT(pInfo->sContext->withMeta);
// ASSERT(IS_META_MSG(pHead->msgType));
// qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
// pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
// pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
// pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
// pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
// pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
// memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
// return NULL;
// }
// }
// else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
// int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
//
// while(1){
// if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
// qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
// return NULL;
// }
// SWalCont* pHead = &pInfo->pCkHead->head;
// qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
//
// if (pHead->msgType == TDMT_VND_SUBMIT) {
// SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
// tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
// SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid,
// &pInfo->pRes); if(block){
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
// qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
// return block;
// }else{
// fetchVer++;
// }
// } else{
// ASSERT(pInfo->sContext->withMeta);
// ASSERT(IS_META_MSG(pHead->msgType));
// qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
// pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
// pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
// pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
// pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
// pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
// memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
// return NULL;
// }
// }
return NULL;
}
@ -1689,7 +1712,7 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pT
// create tq reader
SStreamRawScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamRawScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
return NULL;
@ -1699,13 +1722,12 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pT
pInfo->sContext = pHandle->sContext;
pOperator->name = "RawStreamScanOperator";
// pOperator->blocking = false;
// pOperator->status = OP_NOT_OPENED;
// pOperator->blocking = false;
// pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo,
NULL, NULL, NULL);
pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, NULL, NULL, NULL);
return pOperator;
}
@ -1724,7 +1746,7 @@ static void destroyStreamScanOperatorInfo(void* param) {
}
if (pStreamScan->pPseudoExpr) {
destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr);
taosMemoryFreeClear(pStreamScan->pPseudoExpr);
taosMemoryFree(pStreamScan->pPseudoExpr);
}
updateInfoDestroy(pStreamScan->pUpdateInfo);
@ -1815,6 +1837,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->readHandle = *pHandle;
pInfo->tableUid = pScanPhyNode->uid;
pTaskInfo->streamInfo.snapshotVer = pHandle->version;
// set the extract column id to streamHandle
tqReaderSetColIdList(pInfo->tqReader, pColIds);
@ -1858,8 +1881,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamScan, NULL, NULL, destroyStreamScanOperatorInfo,
NULL, NULL, NULL);
__optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL, NULL, NULL);
return pOperator;

View File

@ -1828,12 +1828,6 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt
return needed;
}
void increaseTs(SqlFunctionCtx* pCtx) {
if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) {
// pCtx[0].increase = true;
}
}
void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) {
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
// Todo(liuyao) support partition by column
@ -1895,7 +1889,6 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
if (isStream) {
ASSERT(numOfCols > 0);
increaseTs(pSup->pCtx);
initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
}
@ -3050,6 +3043,7 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo)
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->aggSup.currentPageId = -1;
}
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
@ -3420,7 +3414,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
initBasicInfo(&pInfo->binfo, pResBlock);
ASSERT(numOfCols > 0);
increaseTs(pOperator->exprSupp.pCtx);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
@ -3451,6 +3444,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
// semi interval operator does not catch result
pInfo->isFinal = false;
pOperator->name = "StreamSemiIntervalOperator";
ASSERT(pInfo->aggSup.currentPageId == -1);
}
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
@ -3559,11 +3553,10 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
initBasicInfo(pBasicInfo, pResultBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
pSup->pCtx[i].pBuf = NULL;
pSup->pCtx[i].saveHandle.pBuf = NULL;
}
ASSERT(numOfCols > 0);
increaseTs(pSup->pCtx);
return TSDB_CODE_SUCCESS;
}
@ -3820,7 +3813,7 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes
}
if (pWinInfo->pos.pageId == -1) {
*pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize);
*pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize);
if (*pResult == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -4337,6 +4330,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
}
}
clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf);
pInfo->streamAggSup.currentPageId = -1;
}
static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {

View File

@ -97,7 +97,7 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
// allocate the overflow buffer page to hold this k/v.
int32_t newPageId = -1;
SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId);
SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId);
if (pNewPage == NULL) {
return terrno;
}
@ -227,7 +227,7 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) {
}
int32_t pageId = -1;
SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId);
SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId);
if (p == NULL) {
return terrno;
}

View File

@ -180,7 +180,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
int32_t pageId = -1;
void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
blockDataDestroy(p);
return terrno;
@ -512,7 +512,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
}
int32_t pageId = -1;
void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
return terrno;
}

View File

@ -1146,8 +1146,9 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock);
static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos);
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
// the data is loaded, not only the block SMA value
@ -1159,6 +1160,7 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c
}
ASSERT(0);
return 0;
}
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
@ -1199,7 +1201,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
pBuf->v = *(int64_t*)tval;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
} else {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
@ -1211,7 +1213,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(int64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
@ -1224,7 +1226,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(uint64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
@ -1236,7 +1238,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(double*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
@ -1250,7 +1252,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
}
@ -1275,7 +1277,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1287,7 +1289,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1306,7 +1308,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1318,7 +1320,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1337,7 +1339,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1349,7 +1351,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1368,7 +1370,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1380,7 +1382,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1401,7 +1403,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1413,7 +1415,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1432,7 +1434,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1444,7 +1446,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1463,7 +1465,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1475,7 +1477,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1494,7 +1496,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1506,7 +1508,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1526,7 +1528,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1538,7 +1540,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1557,7 +1559,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@ -1569,7 +1571,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@ -1580,7 +1582,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
_min_max_over:
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pBuf->nullTupleSaved = true;
}
return numOfElems;
@ -1599,8 +1601,7 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) {
}
static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex);
static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex);
static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex);
int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
@ -1648,34 +1649,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
return;
}
int32_t pageId = pTuplePos->pageId;
int32_t offset = pTuplePos->offset;
if (pCtx->saveHandle.pBuf != NULL) {
if (pTuplePos->pageId != -1) {
int32_t numOfCols = pCtx->subsidiaries.num;
const char* p = loadTupleData(pCtx, pTuplePos);
if (pTuplePos->pageId != -1) {
int32_t numOfCols = pCtx->subsidiaries.num;
SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
bool* nullList = (bool*)p;
char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
bool* nullList = (bool*)((char*)pPage + offset);
char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
// todo set the offset value to optimize the performance.
for (int32_t j = 0; j < numOfCols; ++j) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
// todo set the offset value to optimize the performance.
for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
if (nullList[j]) {
colDataAppendNULL(pDstCol, rowIndex);
} else {
colDataAppend(pDstCol, rowIndex, pStart, false);
SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
if (nullList[j]) {
colDataAppendNULL(pDstCol, rowIndex);
} else {
colDataAppend(pDstCol, rowIndex, pStart, false);
}
pStart += pDstCol->info.bytes;
}
pStart += pDstCol->info.bytes;
}
releaseBufPage(pCtx->pBuf, pPage);
}
}
@ -2756,15 +2752,15 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
}
static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
if (pCtx->subsidiaries.num <= 0) {
return;
}
if (!pInfo->hasResult) {
doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock);
} else {
doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
}
}
@ -2778,7 +2774,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur
memcpy(pInfo->buf, pData, pInfo->bytes);
pInfo->ts = currentTs;
saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@ -2982,7 +2978,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
pOutput->bytes = pInput->bytes;
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
firstlastSaveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
pOutput->hasResult = true;
}
@ -3087,7 +3083,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i
}
pInfo->ts = cts;
saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@ -3420,7 +3416,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS;
@ -3448,7 +3444,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
@ -3500,7 +3496,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
@ -3524,7 +3520,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple by over writing the old data
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
@ -3541,38 +3537,13 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
* |(n columns, one bit for each column)| src column #1| src column #2|
* +------------------------------------+--------------+--------------+
*/
void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
SFilePage* pPage = NULL;
void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies, char* buf) {
char* nullList = buf;
char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num);
// todo refactor: move away
int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
completeRowSize += pc->pExpr->base.resSchema.bytes;
}
if (pCtx->curBufPage == -1) {
pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
pPage->num = sizeof(SFilePage);
} else {
pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage);
if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) {
// current page is all used, let's prepare a new buffer page
releaseBufPage(pCtx->pBuf, pPage);
pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
pPage->num = sizeof(SFilePage);
}
}
pPos->pageId = pCtx->curBufPage;
pPos->offset = pPage->num;
// keep the current row data, extract method
int32_t offset = 0;
bool* nullList = (bool*)((char*)pPage + pPage->num);
char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
for (int32_t i = 0; i < pSubsidiaryies->num; ++i) {
SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
@ -3593,50 +3564,88 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
offset += pCol->info.bytes;
}
pPage->num += completeRowSize;
setBufPageDirty(pPage, true);
releaseBufPage(pCtx->pBuf, pPage);
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
#endif
return buf;
}
void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) {
STuplePos p = {0};
if (pHandle->pBuf != NULL) {
SFilePage* pPage = NULL;
int32_t numOfCols = pCtx->subsidiaries.num;
bool* nullList = (bool*)((char*)pPage + pPos->offset);
char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
int32_t offset = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
offset += pCol->info.bytes;
continue;
}
char* p = colDataGetData(pCol, rowIndex);
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
if (pHandle->currentPage == -1) {
pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
pPage->num = sizeof(SFilePage);
} else {
memcpy(pStart + offset, p, pCol->info.bytes);
pPage = getBufPage(pHandle->pBuf, pHandle->currentPage);
if (pPage->num + length > getBufPageSize(pHandle->pBuf)) {
// current page is all used, let's prepare a new buffer page
releaseBufPage(pHandle->pBuf, pPage);
pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
pPage->num = sizeof(SFilePage);
}
}
offset += pCol->info.bytes;
p = (STuplePos) {.pageId = pHandle->currentPage, .offset = pPage->num};
memcpy(pPage->data + pPage->num, pBuf, length);
pPage->num += length;
setBufPageDirty(pPage, true);
releaseBufPage(pHandle->pBuf, pPage);
} else {
// other tuple save policy
}
setBufPageDirty(pPage, true);
releaseBufPage(pCtx->pBuf, pPage);
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset);
#endif
return p;
}
STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) {
if (pCtx->subsidiaries.rowLen == 0) {
int32_t rowLen = 0;
for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
rowLen += pc->pExpr->base.resSchema.bytes;
}
pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool);
pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen);
}
char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen);
}
static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
if (pHandle->pBuf != NULL) {
SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
memcpy(pPage->data + pPos->offset, pBuf, length);
setBufPageDirty(pPage, true);
releaseBufPage(pHandle->pBuf, pPage);
} else {
}
return TSDB_CODE_SUCCESS;
}
static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos);
return TSDB_CODE_SUCCESS;
}
static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) {
if (pHandle->pBuf != NULL) {
SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
char* p = pPage->data + pPos->offset;
releaseBufPage(pHandle->pBuf, pPage);
return p;
} else {
return NULL;
}
}
static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) {
return doLoadTupleData(&pCtx->saveHandle, pPos);
}
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
@ -3788,8 +3797,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
SColumnInfoData* pCol = pInput->pData[0];
int32_t start = pInput->startRowIndex;
int32_t numOfRows = pInput->numOfRows;
// check the valid data one by one
for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
if (colDataIsNull_f(pCol->nullbitmap, i)) {
@ -4964,7 +4971,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (pInfo->numSampled < pInfo->samples) {
sampleAssignResult(pInfo, data, pInfo->numSampled);
if (pCtx->subsidiaries.num > 0) {
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
pInfo->numSampled++;
} else {
@ -4972,7 +4979,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (j < pInfo->samples) {
sampleAssignResult(pInfo, data, j);
if (pCtx->subsidiaries.num > 0) {
doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
}
}
}
@ -4995,7 +5002,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
}
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos);
pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pInfo->nullTupleSaved = true;
}

View File

@ -101,6 +101,14 @@ bool fmIsBuiltinFunc(const char* pFunc) {
return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
}
EFunctionType fmGetFuncType(const char* pFunc) {
void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
if (NULL != pVal) {
return funcMgtBuiltins[*(int32_t*)pVal].type;
}
return FUNCTION_TYPE_UDF;
}
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
if (fmIsUserDefinedFunc(pFunc->funcId) || pFunc->funcId < 0 || pFunc->funcId >= funcMgtBuiltinsNum) {
return FUNC_DATA_REQUIRED_DATA_LOAD;

View File

@ -372,7 +372,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
pPageIdList = pList;
}
pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId);
pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
pSlot->info.pageId = pageId;
taosArrayPush(pPageIdList, &pageId);
}

View File

@ -81,7 +81,7 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) {
taosDirName(path);
#endif
} else {
strncpy(path, tsProcPath, strlen(tsProcPath));
strncpy(path, tsProcPath, PATH_MAX);
taosDirName(path);
}
#ifdef WINDOWS

View File

@ -453,7 +453,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
goto _return;
}
taosCloseFile(&file);
strncpy(udf->path, path, strlen(path));
strncpy(udf->path, path, PATH_MAX);
tFreeSFuncInfo(pFuncInfo);
taosArrayDestroy(retrieveRsp.pFuncInfos);
msgInfo->code = 0;
@ -566,17 +566,17 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc));
char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *startSuffix = "_start";
strncpy(startFuncName, processFuncName, strlen(processFuncName));
strncpy(startFuncName, processFuncName, sizeof(startFuncName));
strncat(startFuncName, startSuffix, strlen(startSuffix));
uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc));
char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0};
char *finishSuffix = "_finish";
strncpy(finishFuncName, processFuncName, strlen(processFuncName));
strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
strncat(finishFuncName, finishSuffix, strlen(finishSuffix));
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc));
char mergeFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *mergeSuffix = "_merge";
strncpy(finishFuncName, processFuncName, strlen(processFuncName));
strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
strncat(finishFuncName, mergeSuffix, strlen(mergeSuffix));
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggMergeFunc));
}

View File

@ -171,6 +171,7 @@ TExeCond tCompare(__compar_fn_t func, int8_t cmptype, void* a, void* b, int8_t d
return tDoCompare(func, cmptype, &va, &vb);
}
assert(0);
return BREAK;
#endif
}
TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) {

View File

@ -39,7 +39,8 @@ static void idxGenLRUKey(char* buf, const char* path, int32_t blockId) {
}
static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
if (ctx->type == TFILE) {
assert(len == taosWriteFile(ctx->file.pFile, buf, len));
int nwr = taosWriteFile(ctx->file.pFile, buf, len);
assert(nwr == len);
} else {
memcpy(ctx->mem.buf + ctx->offset, buf, len);
}

View File

@ -97,16 +97,23 @@ typedef struct SCollectMetaKeyCxt {
typedef struct SCollectMetaKeyFromExprCxt {
SCollectMetaKeyCxt* pComCxt;
bool hasLastRow;
int32_t errCode;
} SCollectMetaKeyFromExprCxt;
static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
if (fmIsBuiltinFunc(pFunc->functionName)) {
return DEAL_RES_CONTINUE;
switch (fmGetFuncType(pFunc->functionName)) {
case FUNCTION_TYPE_LAST_ROW:
pCxt->hasLastRow = true;
break;
case FUNCTION_TYPE_UDF:
pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
break;
default:
break;
}
pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
@ -136,9 +143,6 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c
if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES))) {
code = reserveDnodeRequiredInCache(pCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS == code) {
code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pDb, pCxt->pMetaCache);
}
return code;
}
@ -185,9 +189,19 @@ static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOpera
return code;
}
static int32_t reserveDbCfgForLastRow(SCollectMetaKeyCxt* pCxt, SNode* pTable) {
if (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable)) {
return TSDB_CODE_SUCCESS;
}
return reserveDbCfgInCache(pCxt->pParseCxt->acctId, ((SRealTableNode*)pTable)->table.dbName, pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .hasLastRow = false, .errCode = TSDB_CODE_SUCCESS};
nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
if (TSDB_CODE_SUCCESS == cxt.errCode && cxt.hasLastRow) {
cxt.errCode = reserveDbCfgForLastRow(pCxt, pStmt->pFromTable);
}
return cxt.errCode;
}
@ -365,7 +379,7 @@ static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS,
pCxt->pMetaCache);
}
@ -411,7 +425,7 @@ static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TOPICS,
pCxt->pMetaCache);
}
@ -506,7 +520,7 @@ static int32_t collectMetaKeyFromShowBlockDist(SCollectMetaKeyCxt* pCxt, SShowTa
}
static int32_t collectMetaKeyFromShowSubscriptions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS,
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS,
pCxt->pMetaCache);
}

View File

@ -1669,6 +1669,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
pDb = taosHashIterate(context.pDbFNameHashObj, pDb);
}
}
if (pContext->pStmtCb) {
context.pVgroupsHashObj = NULL;
context.pTableBlockHashObj = NULL;
}
destroyInsertParseContext(&context);
return code;
}

View File

@ -142,8 +142,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_STREAMS_STMT,
.pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
.pTableName = TSDB_PERFS_TABLE_STREAMS,
.pDbName = TSDB_INFORMATION_SCHEMA_DB,
.pTableName = TSDB_INS_TABLE_STREAMS,
.numOfShowCols = 1,
.pShowCols = {"stream_name"}
},
@ -184,8 +184,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_TOPICS_STMT,
.pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
.pTableName = TSDB_PERFS_TABLE_TOPICS,
.pDbName = TSDB_INFORMATION_SCHEMA_DB,
.pTableName = TSDB_INS_TABLE_TOPICS,
.numOfShowCols = 1,
.pShowCols = {"topic_name"}
},
@ -240,8 +240,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
.pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
.pTableName = TSDB_PERFS_TABLE_SUBSCRIPTIONS,
.pDbName = TSDB_INFORMATION_SCHEMA_DB,
.pTableName = TSDB_INS_TABLE_SUBSCRIPTIONS,
.numOfShowCols = 1,
.pShowCols = {"*"}
},
@ -2160,15 +2160,16 @@ static int32_t setTableIndex(STranslateContext* pCxt, SName* pName, SRealTableNo
return TSDB_CODE_SUCCESS;
}
static int32_t setTableCacheLastMode(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (!pSelect->hasLastRowFunc || QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable)) {
return TSDB_CODE_SUCCESS;
}
SDbCfgInfo dbCfg = {0};
int32_t code = getDBCfg(pCxt, pRealTable->table.dbName, &dbCfg);
SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable;
SDbCfgInfo dbCfg = {0};
int32_t code = getDBCfg(pCxt, pTable->table.dbName, &dbCfg);
if (TSDB_CODE_SUCCESS == code) {
pRealTable->cacheLastMode = dbCfg.cacheLast;
pTable->cacheLastMode = dbCfg.cacheLast;
}
return code;
}
@ -2192,9 +2193,6 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
if (TSDB_CODE_SUCCESS == code) {
code = setTableIndex(pCxt, &name, pRealTable);
}
if (TSDB_CODE_SUCCESS == code) {
code = setTableCacheLastMode(pCxt, &name, pRealTable);
}
}
if (TSDB_CODE_SUCCESS == code) {
pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
@ -2273,10 +2271,14 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
SColumnNode* pCol = (SColumnNode*)pExpr;
len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName);
strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
} else {
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName);
strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
}
strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
return (SNode*)pFunc;
}
@ -3140,6 +3142,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = replaceOrderByAliasForSelect(pCxt, pSelect);
}
if (TSDB_CODE_SUCCESS == code) {
code = setTableCacheLastMode(pCxt, pSelect);
}
return code;
}
@ -6403,8 +6408,8 @@ typedef struct SVgroupDropTableBatch {
char dbName[TSDB_DB_NAME_LEN];
} SVgroupDropTableBatch;
static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo) {
SVDropTbReq req = {.name = pClause->tableName, .igNotExists = pClause->ignoreNotExists};
static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo, uint64_t suid) {
SVDropTbReq req = {.name = pClause->tableName, .suid = suid, .igNotExists = pClause->ignoreNotExists};
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
if (NULL == pTableBatch) {
SVgroupDropTableBatch tBatch = {0};
@ -6445,7 +6450,7 @@ static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableCl
code = getTableHashVgroup(pCxt, pClause->dbName, pClause->tableName, &info);
}
if (TSDB_CODE_SUCCESS == code) {
addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info);
addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info, pTableMeta->suid);
}
over:

Some files were not shown because too many files have changed in this diff Show More