merge 3.0

This commit is contained in:
Xiaoyu Wang 2022-08-11 20:30:02 +08:00
commit 7176bb9198
362 changed files with 6095 additions and 38285 deletions

10
.gitignore vendored
View File

@ -118,4 +118,12 @@ contrib/*
!contrib/test
sql
debug*/
.env
.env
tools/README
tools/LICENSE
tools/README.1ST
tools/THANKS
tools/NEWS
tools/COPYING
tools/BUGS
tools/taos-tools

View File

@ -38,40 +38,21 @@ def pre_test(){
sh '''
cd ${WK}
git reset --hard
git remote prune origin
git fetch
cd ${WKC}
git reset --hard
git clean -fxd
git remote prune origin
git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
sh '''
cd ${WK}
git checkout master
cd ${WKC}
git checkout master
'''
} else if(env.CHANGE_TARGET == '2.0') {
sh '''
cd ${WK}
git checkout 2.0
cd ${WKC}
git checkout 2.0
'''
} else if(env.CHANGE_TARGET == '3.0') {
sh '''
cd ${WK}
git checkout 3.0
cd ${WKC}
git checkout 3.0
'''
} else {
sh '''
cd ${WK}
git checkout develop
cd ${WKC}
git checkout develop
'''
}
sh '''
cd ${WK}
git checkout ''' + env.CHANGE_TARGET + '''
cd ${WKC}
git checkout ''' + env.CHANGE_TARGET + '''
'''
}
if (env.CHANGE_URL =~ /\/TDengine\//) {
sh '''
@ -169,49 +150,24 @@ def pre_test_win(){
bat '''
cd %WIN_INTERNAL_ROOT%
git reset --hard
git remote prune origin
git fetch
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git reset --hard
git remote prune origin
git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout master
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout master
'''
} else if(env.CHANGE_TARGET == '2.0') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout 2.0
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout 2.0
'''
} else if(env.CHANGE_TARGET == '3.0') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout 3.0
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout 3.0
'''
} else {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout develop
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout develop
'''
}
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout ''' + env.CHANGE_TARGET + '''
'''
}
script {
if (env.CHANGE_URL =~ /\/TDengine\//) {
@ -309,6 +265,7 @@ def pre_test_build_win() {
'''
bat '''
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip install .
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
@ -327,6 +284,7 @@ def run_win_test() {
bat '''
echo "windows test ..."
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip install .
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll

View File

@ -20,33 +20,27 @@
# TDengine 简介
TDengine 是一款高性能、分布式、支持 SQL 的时序数据库Time-Series Database。而且除时序数据库功能外它还提供缓存、数据订阅、流式计算等功能最大程度减少研发和运维的复杂度且核心代码包括集群功能全部开源开源协议AGPL v3.0。与其他时序数据数据库相比TDengine 有以下特点
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外TDengine 还提供缓存、数据订阅、流式计算等功能是一极简的时序数据处理平台最大程度的减小系统设计的复杂度降低研发和运营成本。TDengine 的主要优势如下
- **高性能**通过创新的存储引擎设计无论是数据写入还是查询TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,而且存储空间也大为节省
- 高性能通过创新的存储引擎设计无论是数据写入还是查询TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10
- **分布式**通过原生分布式的设计TDengine 提供了水平扩展的能力,只需要增加节点就能获得更强的数据处理能力,同时通过多副本机制保证了系统的高可用
- 云原生通过原生分布式的设计充分利用云平台的优势TDengine 提供了水平扩展能力具备弹性、韧性和可观测性支持k8s部署可运行在公有云、私有云和混合云上
- **支持 SQL**TDengine 采用 SQL 作为数据查询语言,减少学习和迁移成本,同时提供 SQL 扩展来处理时序数据特有的分析,而且支持方便灵活的 schemaless 数据写入
- 极简时序数据平台TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本
- **All in One**:将数据库、消息队列、缓存、流式计算等功能融合一起,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低应用开发和维护成本
- 分析能力:支持 SQL同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术TDengine 具备强大的分析能力
- **零管理**:安装、集群几秒搞定,无任何依赖,不用分库分表,系统运行状态监测能与 Grafana 或其他运维工具无缝集成
- 简单易用无任何依赖安装、集群几秒搞定提供REST以及各种语言连接器与众多第三方工具无缝集成提供命令行程序便于管理和即席查询提供各种运维工具
- **零学习成本**:采用 SQL 查询语言,支持 Python、Java、C/C++、Go、Rust、Node.js 等多种编程语言,与 MySQL 相似,零学习成本。
- **无缝集成**:不用一行代码,即可与 Telegraf、Grafana、EMQX、Prometheus、StatsD、collectd、Matlab、R 等第三方工具无缝集成。
- **互动 Console**: 通过命令行 console不用编程执行 SQL 语句就能做即席查询、各种数据库的操作、管理以及集群的维护.
TDengine 可以广泛应用于物联网、工业互联网、车联网、IT 运维、能源、金融等领域,让大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据能得到高效实时的处理,对业务的运行状态进行实时的监测、预警,从大数据中挖掘出商业价值。
- 核心开源TDengine 的核心代码包括集群功能全部开源截止到2022年8月1日全球超过 135.9k 个运行实例GitHub Star 18.7kFork 4.4k,社区活跃。
# 文档
TDengine 采用传统的关系数据库模型,您可以像使用关系型数据库 MySQL 一样来使用它。但由于引入了超级表,一个采集点一张表的概念,建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture) 与 [数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。
关于完整的使用手册,系统架构和更多细节,请参考 [TDengine 文档](https://docs.taosdata.com) 或者 [English Version](https://docs.tdengine.com)。
# 构建
TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、macOS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。
TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、macOS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过源码或者[安装包](https://docs.taosdata.com/get-started/package/)来安装。本快速指南仅适用于通过源码安装。
## 安装工具
@ -188,7 +182,7 @@ apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
在 X86-64、X86、arm64、arm32 和 mips64 平台上TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
在 X86-64、X86、arm64 平台上TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 等。
aarch64
@ -196,18 +190,6 @@ aarch64
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
aarch32
```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### Windows 系统
如果你使用的是 Visual Studio 2013 版本:
@ -351,19 +333,14 @@ Query OK, 2 row(s) in set (0.001700s)
TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用:
- [Java](https://www.taosdata.com/cn/documentation/connector/java)
- [Java](https://docs.taosdata.com/reference/connector/java/)
- [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp)
- [Python](https://www.taosdata.com/cn/documentation/connector#python)
- [Go](https://www.taosdata.com/cn/documentation/connector#go)
- [RESTful API](https://www.taosdata.com/cn/documentation/connector#restful)
- [Node.js](https://www.taosdata.com/cn/documentation/connector#nodejs)
- [Rust](https://www.taosdata.com/cn/documentation/connector/rust)
- [Python](https://docs.taosdata.com/reference/connector/python/)
- [Go](https://docs.taosdata.com/reference/connector/go/)
- [Node.js](https://docs.taosdata.com/reference/connector/node/)
- [Rust](https://docs.taosdata.com/reference/connector/rust/)
- [C#](https://docs.taosdata.com/reference/connector/csharp/)
- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
## 第三方连接器
@ -372,6 +349,7 @@ TDengine 社区生态中也有一些非常友好的第三方连接器,可以
- [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/examples/lua)
- [PHP](https://www.taosdata.com/en/documentation/connector#c-cpp)
# 运行和添加测试例

View File

@ -20,30 +20,23 @@ English | [简体中文](README-CN.md) | We are hiring, check [here](https://tde
# What is TDengine
TDengine is a high-performance, scalable time-series database with SQL support. Its code including cluster feature is open source under [GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html). Besides the database, it provides caching, stream processing, data subscription and other functionalities to reduce the complexity and cost of development and operation. TDengine differentiates itself from other TSDBs with the following advantages.
TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. Below are the most outstanding advantages of TDengine:
- **High Performance**: TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage cost and compute costs, with an innovatively designed and purpose-built storage engine.
- High-Performance: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
- **Scalable**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source.
- Simplified Solution: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
- **SQL Support**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to handle time-series data better, and supporting convenient and flexible schemaless data ingestion.
- Cloud Native: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine can be deployed on public, private or hybrid clouds.
- **All in One**: TDengine has built-in caching, stream processing and data subscription functions, it is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler and easy to maintain.
- Open Source: TDengines core modules, including cluster feature, are all available under open source licenses. It has gathered 18.7k stars on GitHub, an active developer community, and over 137k running instances worldwide.
- **Seamless Integration**: Without a single line of code, TDengine provide seamless integration with third-party tools such as Telegraf, Grafana, EMQX, Prometheus, StatsD, collectd, etc. More will be integrated.
- Ease of Use: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengines running status can be monitored via Grafana or other DevOps tools.
- **Zero Learning Cost**: With SQL as the query language, support for ubiquitous tools like Python, Java, C/C++, Go, Rust, Node.js connectors, there is zero learning cost.
- **Interactive Console**: TDengine provides convenient console access to the database to run ad hoc queries, maintain the database, or manage the cluster without any programming.
TDengine can be widely applied to Internet of Things (IoT), Connected Vehicles, Industrial IoT, DevOps, energy, finance and many other scenarios.
- Easy Data Analytics: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
# Documentation
For user manual, system design and architecture, engineering blogs, refer to [TDengine Documentation](https://www.taosdata.com/en/documentation/)(中文版请点击[这里](https://www.taosdata.com/cn/documentation20/))
for details. The documentation from our website can also be downloaded locally from _documentation/tdenginedocs-en_ or _documentation/tdenginedocs-cn_.
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) (中文版请点击[这里](https://docs.taosdata.com))
# Building
@ -205,8 +198,8 @@ apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform.
You can also specify CPUTYPE option like aarch64 too if the detection result is not correct:
aarch64:
@ -214,18 +207,6 @@ aarch64:
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
aarch32:
```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64:
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
@ -381,13 +362,14 @@ Query OK, 2 row(s) in set (0.001700s)
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
- [Java](https://www.taosdata.com/en/documentation/connector/java)
- [C/C++](https://www.taosdata.com/en/documentation/connector#c-cpp)
- [Python](https://www.taosdata.com/en/documentation/connector#python)
- [Go](https://www.taosdata.com/en/documentation/connector#go)
- [RESTful API](https://www.taosdata.com/en/documentation/connector#restful)
- [Node.js](https://www.taosdata.com/en/documentation/connector#nodejs)
- [Rust](https://www.taosdata.com/en/documentation/connector/rust)
- [Java](https://docs.taosdata.com/reference/connector/java/)
- [C/C++](https://docs.taosdata.com/reference/connector/cpp/)
- [Python](https://docs.taosdata.com/reference/connector/python/)
- [Go](https://docs.taosdata.com/reference/connector/go/)
- [Node.js](https://docs.taosdata.com/reference/connector/node/)
- [Rust](https://docs.taosdata.com/reference/connector/rust/)
- [C#](https://docs.taosdata.com/reference/connector/csharp/)
- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
## Third Party Connectors
@ -396,21 +378,13 @@ The TDengine community has also kindly built some of their own connectors! Follo
- [Rust Bindings](https://github.com/songtianyi/tdengine-rust-bindings/tree/master/examples)
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
- [PHP](https://www.taosdata.com/en/documentation/connector#c-cpp)
# How to run the test cases and how to add a new test case
TDengine's test framework and all test cases are fully open source.
Please refer to [this document](https://github.com/taosdata/TDengine/blob/develop/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md) for how to run test and develop new test case.
# TDengine Roadmap
- Support event-driven stream computing
- Support user defined functions
- Support MQTT connection
- Support OPC connection
- Support Hadoop, Spark connections
- Support Tableau and other BI tools
# Contribute to TDengine
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.

View File

@ -22,7 +22,9 @@ ELSEIF (TD_WINDOWS)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
IF (BUILD_TOOLS)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
ENDIF ()
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 2d68404
GIT_TAG 11d23e5
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

After

Width:  |  Height:  |  Size: 37 KiB

View File

@ -1,3 +1,2 @@
```rust
{{#include docs/examples/rust/schemalessexample/examples/influxdb_line_example.rs}}
```

View File

@ -1,3 +1,2 @@
```rust
{{#include docs/examples/rust/schemalessexample/examples/opentsdb_json_example.rs}}
```

View File

@ -1,3 +1,2 @@
```rust
{{#include docs/examples/rust/schemalessexample/examples/opentsdb_telnet_example.rs}}
```

View File

@ -17,7 +17,6 @@ Currently, TDengine's native interface connectors can support platforms such as
| **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● |
| **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
| **ARM32** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
| **MIPS** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ |
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.

View File

@ -10,16 +10,14 @@ import TabItem from '@theme/TabItem';
import Preparation from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx"
import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx"
import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
`libtaos` is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data.
[`taos`][taos] is the official Rust language connector for TDengine. Rust developers can develop applications to access the TDengine instance data.
`libtaos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is **REST connection**, which connects to TDengine instances via taosAdapter's REST interface.
Rust connector provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is **Websocket connection**, which connects to TDengine instances via taosAdapter service.
The source code for `libtaos` is hosted on [GitHub](https://github.com/taosdata/libtaos-rs).
The source code is hosted on [taosdata/taos-connector-rust](https://github.com/taosdata/taos-connector-rust).
## Supported platforms
@ -30,119 +28,195 @@ REST connections are supported on all platforms that can run Rust.
Please refer to [version support list](/reference/connector#version-support).
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues.
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
## Installation
### Pre-installation
* Install the Rust development toolchain
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
### Adding libtaos dependencies
### Add dependencies
Add the [libtaos][libtaos] dependency to the [Rust](https://rust-lang.org) project as follows, depending on the connection method selected.
Add the dependency to the [Rust](https://rust-lang.org) project as follows, depending on the connection method selected.
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
<Tabs defaultValue="default">
<TabItem value="default" label="Both">
Add [libtaos][libtaos] to the `Cargo.toml` file.
Add [taos] to the `Cargo.toml` file.
```toml
[dependencies]
# use default feature
libtaos = "*"
taos = "*"
```
</TabItem>
<TabItem value="rest" label="REST connection">
<TabItem value="native" label="Native only">
Add [libtaos][libtaos] to the `Cargo.toml` file and enable the `rest` feature.
Add [taos] to the `Cargo.toml` file.
```toml
[dependencies]
# use rest feature
libtaos = { version = "*", features = ["rest"]}
taos = { version = "*", default-features = false, features = ["native"] }
```
</TabItem>
<TabItem value="rest" label="Websocket only">
Add [taos] to the `Cargo.toml` file and enable the `ws` feature.
```toml
[dependencies]
taos = { version = "*", default-features = false, features = ["ws"] }
```
</TabItem>
</Tabs>
### Using connection pools
Please enable the `r2d2` feature in `Cargo.toml`.
```toml
[dependencies]
# with taosc
libtaos = { version = "*", features = ["r2d2"] }
# or rest
libtaos = { version = "*", features = ["rest", "r2d2"] }
```
## Create a connection
The [TaosCfgBuilder] provides the user with an API in the form of a constructor for the subsequent creation of connections or use of connection pools.
In rust connector, we use a DSN connection string as a connection builder. For example,
```rust
let cfg: TaosCfg = TaosCfgBuilder::default()
.ip("127.0.0.1")
.user("root")
.pass("taosdata")
.db("log") // do not set if not require a default database.
.port(6030u16)
.build()
.expect("TaosCfg builder error");
}
let builder = TaosBuilder::from_dsn("taos://")?;
```
You can now use this object to create the connection.
You can now use connection client to create the connection.
```rust
let conn = cfg.connect()? ;
let conn = builder.build()?;
```
The connection object can create more than one.
```rust
let conn = cfg.connect()? ;
let conn2 = cfg.connect()? ;
let conn1 = builder.build()?;
let conn2 = builder.build()?;
```
You can use connection pools in applications.
DSN is short for **D**ata **S**ource **N**ame string - [a data structure used to describe a connection to a data source](https://en.wikipedia.org/wiki/Data_source_name).
```rust
let pool = r2d2::Pool::builder()
.max_size(10000) // max connections
.build(cfg)? ;
A common DSN is basically constructed as this:
// ...
// Use pool to get connection
let conn = pool.get()? ;
```text
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|------|------------|---|-----------|-----------|------|------|------------|-----------------------|
|driver| protocol | | username | password | host | port | database | params |
```
After that, you can perform the following operations on the database.
- **Driver**: the main entrypoint to a processer. **Required**. In Rust connector, the supported driver names are listed here:
- **taos**: the legacy TDengine connection data source.
- **tmq**: subscription data source from TDengine.
- **http/ws**: use websocket protocol via `ws://` scheme.
- **https/wss**: use websocket protocol via `wss://` scheme.
- **Protocol**: the additional information appended to driver, which can be be used to support different kind of data sources. By default, leave it empty for native driver(only under feature "native"), and `ws/wss` for websocket driver (only under feature "ws"). **Optional**.
- **Username**: as its definition, is the username to the connection. **Optional**.
- **Password**: the password of the username. **Optional**.
- **Host**: address host to the datasource. **Optional**.
- **Port**: address port to the datasource. **Optional**.
- **Database**: database name or collection name in the datasource. **Optional**.
- **Params**: a key-value map for any other informations to the datasource. **Optional**.
Here is a simple DSN connection string example:
```text
taos+ws://localhost:6041/test
```
which means connect `localhost` with port `6041` via `ws` protocol, and make `test` as the default database.
So that you can use DSN to specify connection protocol at runtime:
```rust
async fn demo() -> Result<(), Error> {
// get connection ...
use taos::*; // use it like a `prelude` mod, we need some traits at next.
// create database
conn.exec("create database if not exists demo").await?
// change database context
conn.exec("use demo").await?
// create table
conn.exec("create table if not exists tb1 (ts timestamp, v int)").await?
// insert
conn.exec("insert into tb1 values(now, 1)").await?
// query
let rows = conn.query("select * from tb1").await?
for row in rows.rows {
println!("{}", row.into_iter().join(","));
// use native protocol.
let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
let conn1 = builder.build();
// use websocket protocol.
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
```
After connected, you can perform the following operations on the database.
```rust
async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
// prepare database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
let inserted = taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
assert_eq!(inserted, 6);
let mut result = taos.query("select * from `meters`").await?;
for field in result.fields() {
println!("got field: {}", field.name());
}
let values = result.
}
```
Rust connector provides two kinds of ways to fetch data:
```rust
// Query option 1, use rows stream.
let mut rows = result.rows();
while let Some(row) = rows.try_next().await? {
for (name, value) in row {
println!("got value of {}: {}", name, value);
}
}
// Query options 2, use deserialization with serde.
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
let records: Vec<Record> = taos
.query("select * from `meters`")
.await?
.deserialize()
.try_collect()
.await?;
dbg!(records);
Ok(())
```
## Usage examples
### Write data
@ -151,122 +225,138 @@ async fn demo() -> Result<(), Error> {
<RustInsert />
#### InfluxDB line protocol write
#### Stmt bind
<RustInfluxLine />
#### OpenTSDB Telnet line protocol write
<RustOpenTSDBTelnet />
#### OpenTSDB JSON line protocol write
<RustOpenTSDBJson />
<RustBind />
### Query data
<RustQuery />
### More sample programs
| Program Path | Program Description |
| -------------- | ----------------------------------------------------------------------------- |
| [demo.rs] | Basic API Usage Examples |
| [bailongma-rs] | Using TDengine as the Prometheus remote storage API adapter for the storage backend, using the r2d2 connection pool |
<RustQuery />|
## API Reference
### Connection constructor API
### Connector builder
The [Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) constructor pattern is Rust's solution for handling complex data types or optional configuration types. The [libtaos] implementation uses the connection constructor [TaosCfgBuilder] as the entry point for the TDengine Rust connector. The [TaosCfgBuilder] provides optional configuration of servers, ports, databases, usernames, passwords, etc.
Using the `default()` method, you can construct a [TaosCfg] with default parameters for subsequent connections to the database or establishing connection pools.
Use DSN to directly construct a TaosBuilder object.
```rust
let cfg = TaosCfgBuilder::default().build()? ;
let builder = TaosBuilder::from_dsn("")? ;
```
Using the constructor pattern, the user can set on-demand.
Use `builder` to create many connections:
```rust
let cfg = TaosCfgBuilder::default()
.ip("127.0.0.1")
.user("root")
.pass("taosdata")
.db("log")
.port(6030u16)
.build()? ;
let conn: Taos = cfg.build();
```
Create TDengine connection using [TaosCfg] object.
### Connection pool
In complex applications, we recommend enabling connection pools. Connection pool for [taos] is implemented using [r2d2] by enabling "r2d2" feature.
Basically, a connection pool with default parameters can be generated as:
```rust
let conn: Taos = cfg.connect();
let pool = TaosBuilder::from_dsn(dsn)?.pool()?;
```
### Connection pooling
In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2].
As follows, a connection pool with default parameters can be generated.
You can set the connection pool parameters using the `PoolBuilder`.
```rust
let pool = r2d2::Pool::new(cfg)? ;
let dsn = "taos://localhost:6030";
let opts = PoolBuilder::new()
.max_size(5000) // max connections
.max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
.min_idle(Some(1000)) // minimal idle connections
.connection_timeout(Duration::from_secs(2));
let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
```
You can set the same connection pool parameters using the connection pool's constructor.
```rust
use std::time::Duration;
let pool = r2d2::Pool::builder()
.max_size(5000) // max connections
.max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection
.min_idle(Some(1000)) // minimal idle connections
.connection_timeout(Duration::from_minutes(2))
.build(cfg);
```
In the application code, use `pool.get()? ` to get a connection object [Taos].
In the application code, use `pool.get()?` to get a connection object [Taos].
```rust
let taos = pool.get()? ;
```
The [Taos] structure is the connection manager in [libtaos] and provides two main APIs.
### Connection methods
1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT`, etc.
The [Taos] connection struct provides several APIs for convenient use.
1. `exec`: Execute some non-query SQL statements, such as `CREATE`, `ALTER`, `INSERT` etc. and return affected rows (only meaningful to `INSERT`).
```rust
taos.exec().await?
let affected_rows = taos.exec("INSERT INTO tb1 VALUES(now, NULL)").await?;
```
2. `query`: Execute the query statement and return the [TaosQueryData] object.
2. `exec_many`: You can execute many SQL statements in order with `exec_many` method.
```rust
let q = taos.query("select * from log.logs").await?
taos.exec_many([
"CREATE DATABASE test",
"USE test",
"CREATE TABLE `tb1` (`ts` TIMESTAMP, `val` INT)",
]).await?;
```
The [TaosQueryData] object stores the query result data and basic information about the returned columns (column name, type, length).
Column information is stored using [ColumnMeta].
3. `query`: Execute the query statement and return the [ResultSet] object.
```rust
let cols = &q.column_meta;
let mut q = taos.query("select * from log.logs").await?
```
The [ResultSet] object stores the query result data and basic information about the returned columns (column name, type, length).
Get filed information with `fields` method.
```rust
let cols = q.fields();
for col in cols {
println!("name: {}, type: {:?} , bytes: {}", col.name, col.type_, col.bytes);
println!("name: {}, type: {:?} , bytes: {}", col.name(), col.ty(), col.bytes());
}
```
It fetches data line by line.
Users could fetch data by rows.
```rust
for (i, row) in q.rows.iter().enumerate() {
for (j, cell) in row.iter().enumerate() {
println!("cell({}, {}) data: {}", i, j, cell);
let mut rows = result.rows();
let mut nrows = 0;
while let Some(row) = rows.try_next().await? {
for (col, (name, value)) in row.enumerate() {
println!(
"[{}] got value in col {} (named `{:>8}`): {}",
nrows, col, name, value
);
}
nrows += 1;
}
```
Or use it with [serde](https://serde.rs) deserialization.
```rust
#[derive(Debug, Deserialize)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
let records: Vec<Record> = taos
.query("select * from `meters`")
.await?
.deserialize()
.try_collect()
.await?;
```
Note that Rust asynchronous functions and an asynchronous runtime are required.
[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks.
@ -275,110 +365,152 @@ Note that Rust asynchronous functions and an asynchronous runtime are required.
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
- `.use_database(database: &str)`: Executes the `USE` statement.
In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage.
### Bind API
### Bind Interface
Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object.
Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command with the [Taos] object.
```rust
let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ;
let mut stmt = Stmt::init(&taos).await?;
stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
```
The bind object provides a set of interfaces for implementing parameter binding.
##### `.set_tbname(tbname: impl ToCString)`
#### `.set_tbname(name)`
To bind table names.
##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)`
Bind sub-table table names and tag values when the SQL statement uses a super table.
```rust
let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)") ? ;
// tags can be created with any supported type, here is an example using JSON
let v = Field::Json(serde_json::from_str("{\"tag1\":\"one, two, three, four, five, six, seven, eight, nine, ten\"}").unwrap());
stmt.set_tbname_tags("tb0", [&tag])? ;
let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
stmt.set_tbname("d0")?;
```
##### `.bind(params: impl IntoParams)`
#### `.set_tags(&[tag])`
Bind value types. Use the [Field] structure to construct the desired type and bind.
Bind tag values when the SQL statement uses a super table.
```rust
let ts = Field::Timestamp(Timestamp::now());
let value = Field::Float(0.0);
stmt.bind(vec![ts, value].iter())? ;
let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)")?;
stmt.set_tbname("d0")?;
stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
```
##### `.execute()`
#### `.bind(&[column])`
Execute SQL.[Stmt] objects can be reused, re-binded, and executed after execution.
Bind value types. Use the [ColumnView] structure to construct the desired type and bind.
```rust
stmt.execute()? ;
let params = vec![
ColumnView::from_millis_timestamp(vec![164000000000]),
ColumnView::from_bools(vec![true]),
ColumnView::from_tiny_ints(vec![i8::MAX]),
ColumnView::from_small_ints(vec![i16::MAX]),
ColumnView::from_ints(vec![i32::MAX]),
ColumnView::from_big_ints(vec![i64::MAX]),
ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
ColumnView::from_unsigned_ints(vec![u32::MAX]),
ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
ColumnView::from_floats(vec![f32::MAX]),
ColumnView::from_doubles(vec![f64::MAX]),
ColumnView::from_varchar(vec!["ABC"]),
ColumnView::from_nchar(vec!["涛思数据"]),
];
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
```
#### `.execute()`
Execute to insert all bind records. [Stmt] objects can be reused, re-bind, and executed after execution. Remember to call `add_batch` before `execute`.
```rust
stmt.add_batch()?.execute()?;
// next bind cycle.
// stmt.set_tbname()? ;
//stmt.bind()? ;
//stmt.execute()? ;
//stmt.add_batch().execute()? ;
```
### Line protocol interface
A runnable example for bind can be found [here](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs).
The line protocol interface supports multiple modes and different precision and requires the introduction of constants in the schemaless module to set.
### Subscription API
Users can subscribe a [TOPIC](../../../taos-sql/tmq/) with TMQ(the TDengine Message Queue) API.
Start from a TMQ builder:
```rust
use libtaos::*;
use libtaos::schemaless::*;
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
- InfluxDB line protocol
Build a consumer:
```rust
let lines = [
"st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000"
"st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000"
];
taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)? ;
```
```rust
let mut consumer = tmq.build()?;
```
- OpenTSDB Telnet Protocol
Subscribe a topic:
```rust
let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"];
taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ;
```
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
- OpenTSDB JSON protocol
Consume messages, and commit the offset for each message.
```rust
let lines = [r#"
{
"metric": "st",
"timestamp": 1626006833,
"value": 10,
"tags": {
"t1": true,
"t2": false,
"t3": 10,
"t4": "123_abc_.! @#$%^&*:;,. /? |+-=()[]{}<>"
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}"#];
taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)? ;
```
}
consumer.commit(offset).await?;
}
}
```
Please move to the Rust documentation hosting page for other related structure API usage instructions: <https://docs.rs/libtaos>.
Unsubscribe:
[libtaos]: https://github.com/taosdata/libtaos-rs
[tdengine]: https://github.com/taosdata/TDengine
[bailongma-rs]: https://github.com/taosdata/bailongma-rs
```rust
consumer.unsubscribe().await;
```
In TMQ DSN, you must choose to subscribe with a group id. Also, there's several options could be set:
- `group.id`: **Required**, a group id is any visible string you set.
- `client.id`: a optional client description string.
- `auto.offset.reset`: choose to subscribe from *earliest* or *latest*, default is *none* which means 'earliest'.
- `enable.auto.commit`: automatically commit with specified time interval. By default - in the recommended way _ you must use `commit` to ensure that you've consumed the messages correctly, otherwise, consumers will received repeated messages when re-subscribe.
- `auto.commit.interval.ms`: the auto commit interval in milliseconds.
Check the whole subscription example at [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
Please move to the Rust documentation hosting page for other related structure API usage instructions: <https://docs.rs/taos>.
[TDengine]: https://github.com/taosdata/TDengine
[r2d2]: https://crates.io/crates/r2d2
[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs
[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html
[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html
[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html
[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html
[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html
[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html
[Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html
[ResultSet]: https://docs.rs/taos/latest/taos/struct.ResultSet.html
[Value]: https://docs.rs/taos/latest/taos/enum.Value.html
[Stmt]: https://docs.rs/taos/latest/taos/stmt/struct.Stmt.html
[taos]: https://crates.io/crates/taos

View File

@ -19,15 +19,15 @@ TDengine's connector can support a wide range of platforms, including X64/X86/AR
The comparison matrix is as follows.
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS** | **Alpha** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------- | --------- |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● |
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **MIPS** | **Alpha** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------- |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | -- |
| **Go** | ● | ● | ● | ○ | ● | ○ | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ○ | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● |
Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified.

View File

@ -3,7 +3,7 @@ title: Schemaless Writing
description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
---
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine starting from version 2.2.0.0, provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
@ -39,10 +39,10 @@ In the schemaless writing data line protocol, each data item in the field_set ne
| -------- | -------- | ------------ | -------------- |
| 1 | none or f64 | double | 8 |
| 2 | f32 | float | 4 |
| 3 | i8 | TinyInt | 1 |
| 4 | i16 | SmallInt | 2 |
| 5 | i32 | Int | 4 |
| 6 | i64 or i | Bigint | 8 |
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
| 5 | i32/u32 | Int/UInt | 4 |
| 6 | i64/i/u64/u | Bigint/Bigint/UBigint/UBigint | 8 |
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
@ -72,11 +72,11 @@ If the subtable obtained by the parse line protocol does not exist, Schemaless c
4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental).
5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
7. If the specified data subtable already exists, and the specified tag column takes a value different from the saved value this time, the value in the latest data row overwrites the old tag column take value.
8. Errors encountered throughout the processing will interrupt the writing process and return an error code.
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
8. In order to improve the efficiency of writing, it is assumed by default that the order of the fields in the same Super is the same (the first data contains all fields, and the following data is in this order). If the order is different, the parameter smlDataFormat needs to be configured to be false. Otherwise, the data is written in the same order, and the data in the library will be abnormal.
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
## Time resolution recognition

View File

@ -1,2 +1,2 @@
[workspace]
members = ["restexample", "nativeexample", "schemalessexample"]
members = ["restexample", "nativeexample"]

View File

@ -5,6 +5,9 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libtaos = { version = "0.4.3" }
tokio = { version = "*", features = ["rt", "macros", "rt-multi-thread"] }
bstr = { version = "*" }
anyhow = "1"
chrono = "0.4"
serde = { version = "1", features = ["derive"] }
tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
taos = { version = "0.*" }

View File

@ -1,19 +1,9 @@
use libtaos::*;
use taos::*;
fn taos_connect() -> Result<Taos, Error> {
TaosCfgBuilder::default()
.ip("localhost")
.user("root")
.pass("taosdata")
// .db("log") // remove comment if you want to connect to database log by default.
.port(6030u16)
.build()
.expect("TaosCfg builder error")
.connect()
}
fn main() {
#[tokio::main]
async fn main() -> Result<(), Error> {
#[allow(unused_variables)]
let taos = taos_connect().unwrap();
println!("Connected")
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
println!("Connected");
Ok(())
}

View File

@ -1,38 +1,40 @@
use bstr::BString;
use libtaos::*;
use taos::*;
#[tokio::main]
async fn main() -> Result<(), Error> {
let taos = TaosCfg::default().connect().expect("fail to connect");
async fn main() -> anyhow::Result<()> {
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
taos.create_database("power").await?;
taos.use_database("power").await?;
taos.exec("CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
let mut stmt = taos.stmt("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
taos.exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
let mut stmt = Stmt::init(&taos)?;
stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
// bind table name and tags
stmt.set_tbname_tags(
"d1001",
[
Field::Binary(BString::from("California.SanFrancisco")),
Field::Int(2),
],
&[Value::VarChar("San Fransico".into()), Value::Int(2)],
)?;
// bind values.
let values = vec![
Field::Timestamp(Timestamp::new(1648432611249, TimestampPrecision::Milli)),
Field::Float(10.3),
Field::Int(219),
Field::Float(0.31),
ColumnView::from_millis_timestamp(vec![1648432611249]),
ColumnView::from_floats(vec![10.3]),
ColumnView::from_ints(vec![219]),
ColumnView::from_floats(vec![0.31]),
];
stmt.bind(&values)?;
// bind one more row
let values2 = vec![
Field::Timestamp(Timestamp::new(1648432611749, TimestampPrecision::Milli)),
Field::Float(12.6),
Field::Int(218),
Field::Float(0.33),
ColumnView::from_millis_timestamp(vec![1648432611749]),
ColumnView::from_floats(vec![12.6]),
ColumnView::from_ints(vec![218]),
ColumnView::from_floats(vec![0.33]),
];
stmt.bind(&values2)?;
// execute
stmt.execute()?;
stmt.add_batch()?;
// execute.
let rows = stmt.execute()?;
assert_eq!(rows, 2);
Ok(())
}

View File

@ -1,3 +1,101 @@
fn main() {
}
use std::time::Duration;
use chrono::{DateTime, Local};
use taos::*;
// Query options 2, use deserialization with serde.
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
}
async fn prepare(taos: Taos) -> anyhow::Result<()> {
let inserted = taos.exec_many([
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
assert_eq!(inserted, 6);
Ok(())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let db = "tmq";
// prepare database
taos.exec_many([
format!("DROP TOPIC IF EXISTS tmq_meters"),
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
// create super table
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"),
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
.await?;
let task = tokio::spawn(prepare(taos));
tokio::time::sleep(Duration::from_secs(1)).await;
// subscribe
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
let mut consumer = tmq.build()?;
consumer.subscribe(["tmq_meters"]).await?;
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
consumer.unsubscribe().await;
task.await??;
Ok(())
}

View File

@ -4,5 +4,9 @@ version = "0.1.0"
edition = "2021"
[dependencies]
libtaos = { version = "0.4.3", features = ["rest"] }
tokio = { version = "*", features = ["rt", "macros", "rt-multi-thread"] }
anyhow = "1"
chrono = "0.4"
serde = { version = "1", features = ["derive"] }
tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
taos = { version = "0.*" }

View File

@ -1,20 +1,9 @@
use libtaos::*;
fn taos_connect() -> Result<Taos, Error> {
TaosCfgBuilder::default()
.ip("localhost")
.user("root")
.pass("taosdata")
// .db("log") // remove comment if you want to connect to database log by default.
.port(6030u16)
.build()
.expect("TaosCfg builder error")
.connect()
}
use taos::*;
#[tokio::main]
async fn main() {
async fn main() -> Result<(), Error> {
#[allow(unused_variables)]
let taos = taos_connect().expect("connect error");
println!("Connected")
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
println!("Connected");
Ok(())
}

View File

@ -1,18 +1,29 @@
use libtaos::*;
use taos::*;
#[tokio::main]
async fn main() -> Result<(), Error> {
let taos = TaosCfg::default().connect().expect("fail to connect");
taos.create_database("power").await?;
taos.exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
let sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
let result = taos.query(sql).await?;
println!("{:?}", result);
async fn main() -> anyhow::Result<()> {
let dsn = "ws://";
let taos = TaosBuilder::from_dsn(dsn)?.build()?;
taos.exec_many([
"DROP DATABASE IF EXISTS power",
"CREATE DATABASE power",
"USE power",
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
]).await?;
let inserted = taos.exec("INSERT INTO
power.d1001 USING power.meters TAGS('San Francisco', 2)
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS('San Francisco', 3)
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS('Los Angeles', 2)
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS('Los Angeles', 3)
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?;
assert_eq!(inserted, 8);
Ok(())
}
// output:
// TaosQueryData { column_meta: [ColumnMeta { name: "affected_rows", type_: Int, bytes: 4 }], rows: [[Int(8)]] }

View File

@ -1,39 +1,25 @@
use libtaos::*;
use taos::sync::*;
fn taos_connect() -> Result<Taos, Error> {
TaosCfgBuilder::default()
.ip("localhost")
.user("root")
.pass("taosdata")
.db("power")
.port(6030u16)
.build()
.expect("TaosCfg builder error")
.connect()
}
#[tokio::main]
async fn main() -> Result<(), Error> {
let taos = taos_connect().expect("connect error");
let result = taos.query("SELECT ts, current FROM meters LIMIT 2").await?;
fn main() -> anyhow::Result<()> {
let taos = TaosBuilder::from_dsn("ws:///power")?.build()?;
let mut result = taos.query("SELECT ts, current FROM meters LIMIT 2")?;
// print column names
let meta: Vec<ColumnMeta> = result.column_meta;
for column in meta {
print!("{}\t", column.name)
}
println!();
let meta = result.fields();
println!("{}", meta.iter().map(|field| field.name()).join("\t"));
// print rows
let rows: Vec<Vec<Field>> = result.rows;
let rows = result.rows();
for row in rows {
for field in row {
print!("{}\t", field);
let row = row?;
for (_name, value) in row {
print!("{}\t", value);
}
println!();
}
Ok(())
}
// output:
// output(suppose you are in +8 timezone):
// ts current
// 2022-03-28 09:56:51.249 10.3
// 2022-03-28 09:56:51.749 12.6
// 2018-10-03T14:38:05+08:00 10.3
// 2018-10-03T14:38:15+08:00 12.6

View File

@ -1,7 +0,0 @@
[package]
name = "schemalessexample"
version = "0.1.0"
edition = "2021"
[dependencies]
libtaos = { version = "0.4.3" }

View File

@ -1,22 +0,0 @@
use libtaos::schemaless::*;
use libtaos::*;
fn main() {
let taos = TaosCfg::default().connect().expect("fail to connect");
taos.raw_query("CREATE DATABASE test").unwrap();
taos.raw_query("USE test").unwrap();
let lines = ["meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"];
let affected_rows = taos
.schemaless_insert(
&lines,
TSDB_SML_LINE_PROTOCOL,
TSDB_SML_TIMESTAMP_MILLISECONDS,
)
.unwrap();
println!("affected_rows={}", affected_rows);
}
// run with: cargo run --example influxdb_line_example

View File

@ -1,25 +0,0 @@
use libtaos::schemaless::*;
use libtaos::*;
fn main() {
let taos = TaosCfg::default().connect().expect("fail to connect");
taos.raw_query("CREATE DATABASE test").unwrap();
taos.raw_query("USE test").unwrap();
let lines = [
r#"[{"metric": "meters.current", "timestamp": 1648432611249, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
{"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}},
{"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}},
{"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#,
];
let affected_rows = taos
.schemaless_insert(
&lines,
TSDB_SML_JSON_PROTOCOL,
TSDB_SML_TIMESTAMP_NOT_CONFIGURED,
)
.unwrap();
println!("affected_rows={}", affected_rows); // affected_rows=4
}
// run with: cargo run --example opentsdb_json_example

View File

@ -1,28 +0,0 @@
use libtaos::schemaless::*;
use libtaos::*;
fn main() {
let taos = TaosCfg::default().connect().expect("fail to connect");
taos.raw_query("CREATE DATABASE test").unwrap();
taos.raw_query("USE test").unwrap();
let lines = [
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
"meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3",
"meters.voltage 1648432611249 219 location=California.SanFrancisco groupid=2",
"meters.voltage 1648432611250 218 location=California.SanFrancisco groupid=2",
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
];
let affected_rows = taos
.schemaless_insert(
&lines,
TSDB_SML_TELNET_PROTOCOL,
TSDB_SML_TIMESTAMP_NOT_CONFIGURED,
)
.unwrap();
println!("affected_rows={}", affected_rows); // affected_rows=8
}
// run with: cargo run --example opentsdb_telnet_example

View File

@ -1,3 +0,0 @@
fn main() {
println!("Hello, world!");
}

View File

@ -4,7 +4,7 @@ sidebar_label: 文档首页
slug: /
---
TDengine是一款开源、[高性能](https://www.taosdata.com/fast)、云原生的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
TDengine 充分利用了时序数据的特点提出了“一个数据采集点一张表”与“超级表”的概念设计了创新的存储引擎让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。

View File

@ -3,7 +3,7 @@ title: 产品简介
toc_max_heading_level: 2
---
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)[高性能](https://www.taosdata.com/tdengine/fast)[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等让大家对TDengine有个整体的了解。
@ -33,17 +33,17 @@ TDengine的主要功能如下
由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html)比如结构化、无需事务、很少删除或更新、写多读少等等设计了全新的针对时序数据的存储引擎和计算引擎因此与其他时序数据库相比TDengine 有以下特点:
- **高性能**通过创新的存储引擎设计无论是数据写入还是查询TDengine 的性能比通用数据库快 10 倍以上也远超其他时序数据库存储空间不及通用数据库的1/10。
- **[高性能](https://www.taosdata.com/tdengine/fast)**通过创新的存储引擎设计无论是数据写入还是查询TDengine 的性能比通用数据库快 10 倍以上也远超其他时序数据库存储空间不及通用数据库的1/10。
- **云原生**通过原生分布式的设计充分利用云平台的优势TDengine 提供了水平扩展能力具备弹性、韧性和可观测性支持k8s部署可运行在公有云、私有云和混合云上。
- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**通过原生分布式的设计充分利用云平台的优势TDengine 提供了水平扩展能力具备弹性、韧性和可观测性支持k8s部署可运行在公有云、私有云和混合云上。
- **极简时序数据平台**TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
- **分析能力**:支持 SQL同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术TDengine 具备强大的分析能力。
- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术TDengine 具备强大的分析能力。
- **简单易用**无任何依赖安装、集群几秒搞定提供REST以及各种语言连接器与众多第三方工具无缝集成提供命令行程序便于管理和即席查询提供各种运维工具。
- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**无任何依赖安装、集群几秒搞定提供REST以及各种语言连接器与众多第三方工具无缝集成提供命令行程序便于管理和即席查询提供各种运维工具。
- **核心开源**TDengine 的核心代码包括集群功能全部开源截止到2022年8月1日全球超过 135.9k 个运行实例GitHub Star 18.7kFork 4.4k,社区活跃。
- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**TDengine 的核心代码包括集群功能全部开源截止到2022年8月1日全球超过 135.9k 个运行实例GitHub Star 18.7kFork 4.4k,社区活跃。
采用 TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面

View File

@ -47,9 +47,7 @@ Docker version 20.10.3, build 48d30b5
## 运行 TDengine CLI
有两种方式在 Docker 环境下使用 TDengine CLI (taos) 访问 TDengine.
- 进入容器后,执行 taos
- 在宿主机使用容器映射到主机的端口进行访问 `taos -h <hostname> -P <port>`
进入容器,执行 taos
```
$ taos
@ -62,47 +60,11 @@ taos>
```
## 访问 REST 接口
taosAdapter 是 TDengine 中提供 REST 服务的组件。下面这条命令会在容器中同时启动 `taosd``taosadapter` 两个服务组件。默认 Docker 镜像同时启动 TDengine 后台服务 taosd 和 taosAdatper。
可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
输出示例如下:
```
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["wal","TINYINT",1],["fsync","INT",4],["comp","TINYINT",1],["cacheModel","VARCHAR",11],["precision","VARCHAR",2],["single_stable","BOOL",1],["status","VARCHAR",10],["retention","VARCHAR",60]],"data":[["information_schema",null,null,14,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"ready"],["performance_schema",null,null,3,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"ready"]],"rows":2}
```
这条命令,通过 REST API 访问 TDengine server这时连接的是从容器映射到主机的 6041 端口。
TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。
## 单独启动 REST 服务
如果想只启动 `taosadapter`
```bash
docker run -d --network=host --name tdengine-taosa -e TAOS_FIRST_EP=tdengine-taosd tdengine/tdengine:3.0.0.0 taosadapter
```
只启动 `taosd`
```bash
docker run -d --network=host --name tdengine-taosd -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:3.0.0.0
```
注意以上为容器使用 host 方式网络配置进行单独部署 taosAdapter 的命令行参数。其他网络访问方式请设置 hostname、 DNS 等必要的网络配置。
## 写入数据
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。
假定启动容器时已经将容器的6030端口映射到了宿主机的6030端口则可以直接在宿主机命令行启动 taosBenchmark也可以进入容器后执行
进入容器,启动 taosBenchmark
```bash
$ taosBenchmark
@ -117,7 +79,7 @@ docker run -d --network=host --name tdengine-taosd -e TAOS_DISABLE_ADAPTER=true
## 体验查询
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。可以直接在宿主机上也可以进入容器后运行
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。。
查询超级表下记录总条数:
@ -148,3 +110,7 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group
```sql
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
```
## 其它
更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)

View File

@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem";
:::
TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包也支持通过 `apt-get` 工具从线上进行安装。
TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包也支持通过 `apt-get` 工具从线上进行安装。
## 安装

View File

@ -1,84 +1,128 @@
---
sidebar_label: 连续查询
description: "连续查询是一个按照预设频率自动执行的查询功能,提供按照时间窗口的聚合查询能力,是一种简化的时间驱动流式计算。"
title: "连续查询Continuous Query"
---
连续查询是 TDengine 定期自动执行的查询采用滑动窗口的方式进行计算是一种简化的时间驱动的流式计算。针对库中的表或超级表TDengine 可提供定期自动执行的连续查询,用户可让 TDengine 推送查询的结果,也可以将结果再写回到 TDengine 中。每次执行的查询是一个时间窗口时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口time window, 参数 interval大小和每次前向增量时间forward sliding times, 参数 sliding
TDengine 的连续查询采用时间驱动模式,可以直接使用 TAOS SQL 进行定义不需要额外的操作。使用连续查询可以方便快捷地按照时间窗口生成结果从而对原始采集数据进行降采样down sampling。用户通过 TAOS SQL 定义连续查询以后TDengine 自动在最后的一个完整的时间周期末端拉起查询,并将计算获得的结果推送给用户或者写回 TDengine。
TDengine 提供的连续查询与普通流计算中的时间窗口计算具有以下区别:
- 不同于流计算的实时反馈计算结果,连续查询只在时间窗口关闭以后才开始计算。例如时间周期是 1 天,那么当天的结果只会在 23:59:59 以后才会生成。
- 如果有历史记录写入到已经计算完成的时间区间,连续查询并不会重新进行计算,也不会重新将结果推送给用户。对于写回 TDengine 的模式,也不会更新已经存在的计算结果。
- 使用连续查询推送结果的模式,服务端并不缓存客户端计算状态,也不提供 Exactly-Once 的语义保证。如果用户的应用端崩溃再次拉起的连续查询将只会从再次拉起的时间开始重新计算最近的一个完整的时间窗口。如果使用写回模式TDengine 可确保数据写回的有效性和连续性。
## 连续查询语法
```sql
[CREATE TABLE AS] SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]]
```
INTERVAL: 连续查询作用的时间窗口
SLIDING: 连续查询的时间窗口向前滑动的时间间隔
## 使用连续查询
下面以智能电表场景为例介绍连续查询的具体使用方法。假设我们通过下列 SQL 语句创建了超级表和子表:
```sql
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
create table D1001 using meters tags ("California.SanFrancisco", 2);
create table D1002 using meters tags ("California.LosAngeles", 2);
...
```
可以通过下面这条 SQL 语句以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压。
```sql
select avg(voltage) from meters interval(1m) sliding(30s);
```
每次执行这条语句,都会重新计算所有数据。 如果需要每隔 30 秒执行一次来增量计算最近一分钟的数据,可以把上面的语句改进成下面的样子,每次使用不同的 `startTime` 并定期执行:
```sql
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
```
这样做没有问题,但 TDengine 提供了更简单的方法,只要在最初的查询语句前面加上 `create table {tableName} as` 就可以了,例如:
```sql
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
```
会自动创建一个名为 `avg_vol` 的新表,然后每隔 30 秒TDengine 会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。例如:
```sql
taos> select * from avg_vol;
ts | avg_voltage_ |
===================================================
2020-07-29 13:37:30.000 | 222.0000000 |
2020-07-29 13:38:00.000 | 221.3500000 |
2020-07-29 13:38:30.000 | 220.1700000 |
2020-07-29 13:39:00.000 | 223.0800000 |
```
需要注意,查询时间窗口的最小值是 10 毫秒,没有时间窗口范围的上限。
此外TDengine 还支持用户指定连续查询的起止时间。如果不输入开始时间,连续查询将从第一条原始数据所在的时间窗口开始;如果没有输入结束时间,连续查询将永久运行;如果用户指定了结束时间,连续查询在系统时间达到指定的时间以后停止运行。比如使用下面的 SQL 创建的连续查询将运行一小时,之后会自动停止。
```sql
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
```
需要说明的是,上面例子中的 `now` 是指创建连续查询的时间而不是查询执行的时间否则查询就无法自动停止了。另外为了尽量避免原始数据延迟写入导致的问题TDengine 中连续查询的计算有一定的延迟。也就是说一个时间窗口过去后TDengine 并不会立即计算这个窗口的数据,所以要稍等一会(一般不会超过 1 分钟)才能查到计算结果。
## 管理连续查询
用户可在控制台中通过 `show streams` 命令来查看系统中全部运行的连续查询,并可以通过 `kill stream` 命令杀掉对应的连续查询。后续版本会提供更细粒度和便捷的连续查询管理命令。
---
sidebar_label: 流式计算
description: "TDengine 流式计算将数据的写入、预处理、复杂分析、实时计算、报警触发等功能融为一体,是一个能够降低用户部署成本、存储成本和运维成本的计算引擎。"
title: 流式计算
---
在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。
使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。
## 流式计算的创建
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
IGNORE EXPIRED
}
```
详细的语法规则参考 [流式计算](../../taos-sql/stream)
## 示例一
企业电表的数据经常都是成百上千亿条的,那么想要将这些分散、凌乱的数据清洗或转换都需要比较长的时间,很难做到高效性和实时性,以下例子中,通过流计算可以将过去 12 小时电表电压大于 220V 的数据清洗掉,然后以小时为窗口整合并计算出每个窗口中电流的最大值,并将结果输出到指定的数据表中。
### 创建 DB 和原始数据表
首先准备数据,完成建库、建一张超级表和多张子表操作
```sql
drop database if exists stream_db;
create database stream_db;
create stable stream_db.meters (ts timestamp, current float, voltage int) TAGS (location varchar(64), groupId int);
create table stream_db.d1001 using stream_db.meters tags("beijing", 1);
create table stream_db.d1002 using stream_db.meters tags("guangzhou", 2);
create table stream_db.d1003 using stream_db.meters tags("shanghai", 3);
```
### 创建流
```sql
create stream stream1 into stream_db.stream1_output_stb as select _wstart as start, _wend as end, max(current) as max_current from stream_db.meters where voltage <= 220 and ts > now - 12h interval (1h);
```
### 写入数据
```sql
insert into stream_db.d1001 values(now-14h, 10.3, 210);
insert into stream_db.d1001 values(now-13h, 13.5, 216);
insert into stream_db.d1001 values(now-12h, 12.5, 219);
insert into stream_db.d1002 values(now-11h, 14.7, 221);
insert into stream_db.d1002 values(now-10h, 10.5, 218);
insert into stream_db.d1002 values(now-9h, 11.2, 220);
insert into stream_db.d1003 values(now-8h, 11.5, 217);
insert into stream_db.d1003 values(now-7h, 12.3, 227);
insert into stream_db.d1003 values(now-6h, 12.3, 215);
```
### 查询以观查结果
```sql
taos> select * from stream_db.stream1_output_stb;
start | end | max_current | group_id |
===================================================================================================
2022-08-09 14:00:00.000 | 2022-08-09 15:00:00.000 | 10.50000 | 0 |
2022-08-09 15:00:00.000 | 2022-08-09 16:00:00.000 | 11.20000 | 0 |
2022-08-09 16:00:00.000 | 2022-08-09 17:00:00.000 | 11.50000 | 0 |
2022-08-09 18:00:00.000 | 2022-08-09 19:00:00.000 | 12.30000 | 0 |
Query OK, 4 rows in database (0.012033s)
```
## 示例二
某运营商平台要采集机房所有服务器的系统资源指标,包含 cpu、内存、网络延迟等采集后需要对数据进行四舍五入运算将地域和服务器名以下划线拼接然后将结果按时间排序并以服务器名分组输出到新的数据表中。
### 创建 DB 和原始数据表
首先准备数据,完成建库、建一张超级表和多张子表操作
```sql
drop database if exists stream_db;
create database stream_db;
create stable stream_db.idc (ts timestamp, cpu float, mem float, latency float) TAGS (location varchar(64), groupId int);
create table stream_db.server01 using stream_db.idc tags("beijing", 1);
create table stream_db.server02 using stream_db.idc tags("shanghai", 2);
create table stream_db.server03 using stream_db.idc tags("beijing", 2);
create table stream_db.server04 using stream_db.idc tags("tianjin", 3);
create table stream_db.server05 using stream_db.idc tags("shanghai", 1);
```
### 创建流
```sql
create stream stream2 into stream_db.stream2_output_stb as select ts, concat_ws("_", location, tbname) as server_location, round(cpu) as cpu, round(mem) as mem, round(latency) as latency from stream_db.idc partition by tbname order by ts;
```
### 写入数据
```sql
insert into stream_db.server01 values(now-14h, 50.9, 654.8, 23.11);
insert into stream_db.server01 values(now-13h, 13.5, 221.2, 11.22);
insert into stream_db.server02 values(now-12h, 154.7, 218.3, 22.33);
insert into stream_db.server02 values(now-11h, 120.5, 111.5, 5.55);
insert into stream_db.server03 values(now-10h, 101.5, 125.6, 5.99);
insert into stream_db.server03 values(now-9h, 12.3, 165.6, 6.02);
insert into stream_db.server04 values(now-8h, 160.9, 120.7, 43.51);
insert into stream_db.server04 values(now-7h, 240.9, 520.7, 54.55);
insert into stream_db.server05 values(now-6h, 190.9, 320.7, 55.43);
insert into stream_db.server05 values(now-5h, 110.9, 600.7, 35.54);
```
### 查询以观查结果
```sql
taos> select ts, server_location, cpu, mem, latency from stream_db.stream2_output_stb;
ts | server_location | cpu | mem | latency |
================================================================================================================================
2022-08-09 21:24:56.785 | beijing_server01 | 51.00000 | 655.00000 | 23.00000 |
2022-08-09 22:24:56.795 | beijing_server01 | 14.00000 | 221.00000 | 11.00000 |
2022-08-09 23:24:56.806 | shanghai_server02 | 155.00000 | 218.00000 | 22.00000 |
2022-08-10 00:24:56.815 | shanghai_server02 | 121.00000 | 112.00000 | 6.00000 |
2022-08-10 01:24:56.826 | beijing_server03 | 102.00000 | 126.00000 | 6.00000 |
2022-08-10 02:24:56.838 | beijing_server03 | 12.00000 | 166.00000 | 6.00000 |
2022-08-10 03:24:56.846 | tianjin_server04 | 161.00000 | 121.00000 | 44.00000 |
2022-08-10 04:24:56.853 | tianjin_server04 | 241.00000 | 521.00000 | 55.00000 |
2022-08-10 05:24:56.866 | shanghai_server05 | 191.00000 | 321.00000 | 55.00000 |
2022-08-10 06:24:57.301 | shanghai_server05 | 111.00000 | 601.00000 | 36.00000 |
Query OK, 10 rows in database (0.022950s)
```

View File

@ -112,9 +112,9 @@ alter_database_options:
alter_database_option: {
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
| CACHESIZE value
| FSYNC value
| WAL_LEVEL value
| WAL_FSYNC_PERIOD value
| KEEP value
| WAL value
}
```

View File

@ -140,10 +140,6 @@ taos> SELECT ts, ts AS primary_key_ts FROM d1001;
但是针对`first(*)`、`last(*)`、`last_row(*)`不支持针对单列的重命名。
### 隐式结果列
`Select_exprs`可以是表所属列的列名,也可以是基于列的函数表达式或计算式,数量的上限 256 个。当用户使用了`interval`或`group by tags`的子句以后,在最后返回结果中会强制返回时间戳列(第一列)和 group by 子句中的标签列。后续的版本中可以支持关闭 group by 子句中隐式列的输出,列输出完全由 select 子句控制。
### 伪列
**TBNAME**
@ -152,7 +148,13 @@ taos> SELECT ts, ts AS primary_key_ts FROM d1001;
获取一个超级表所有的子表名及相关的标签信息:
```mysql
SELECT TBNAME, location FROM meters;
SELECT DISTINCT TBNAME, location FROM meters;
```
建议用户使用 INFORMATION_SCHEMA 下的 INS_TAGS 系统表来查询超级表的子表标签信息,例如获取超级表 meters 所有的子表名和标签值:
```mysql
SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters';
```
统计超级表下辖子表数量:

View File

@ -1,6 +1,6 @@
---
sidebar_label: 元数据
title: 元数据库
sidebar_label: 元数据
title: 存储数据的 Information_Schema 数据库
---
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:

View File

@ -1,9 +1,9 @@
---
sidebar_label: 性能数据库
title: 性能数据库
sidebar_label: 统计数据
title: 存储统计数据的 Performance_Schema 数据库
---
TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和详细的表结构。
TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和表结构。
## PERF_APP
@ -94,16 +94,16 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
## PERF_TRANS
| # | **列名** | **数据类型** | **说明** |
| --- | :--------------: | ------------ | -------- |
| 1 | id | INT | |
| 2 | create_time | TIMESTAMP | |
| 3 | stage | BINARY(12) | |
| 4 | db1 | BINARY(64) | |
| 5 | db2 | BINARY(64) | |
| 6 | failed_times | INT | |
| 7 | last_exec_time | TIMESTAMP | |
| 8 | last_action_info | BINARY(511) | |
| # | **列名** | **数据类型** | **说明** |
| --- | :--------------: | ------------ | -------------------------------------------------------------- |
| 1 | id | INT | 正在进行的事务的编号 |
| 2 | create_time | TIMESTAMP | 事务的创建时间 |
| 3 | stage | BINARY(12) | 事务的当前阶段,通常为 redoAction、undoAction、commit 三个阶段 |
| 4 | db1 | BINARY(64) | 与此事务存在冲突的数据库一的名称 |
| 5 | db2 | BINARY(64) | 与此事务存在冲突的数据库二的名称 |
| 6 | failed_times | INT | 事务执行失败的总次数 |
| 7 | last_exec_time | TIMESTAMP | 事务上次执行的时间 |
| 8 | last_action_info | BINARY(511) | 事务上次执行失败的明细信息 |
## PERF_SMAS

View File

@ -17,7 +17,6 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
| **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● |
| **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
| **ARM32** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
| **MIPS 龙芯** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ |
| **Alpha 申威** | **Linux** | ○ | ○ | -- | -- | -- | -- | ○ |
| **X86 海光** | **Linux** | ○ | ○ | ○ | -- | -- | -- | ○ |
@ -49,7 +48,6 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **连续查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| ** TMQ ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |

View File

@ -10,141 +10,213 @@ import TabItem from '@theme/TabItem';
import Preparition from "./_preparition.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustInfluxLine from "../../07-develop/03-insert-data/_rust_line.mdx"
import RustOpenTSDBTelnet from "../../07-develop/03-insert-data/_rust_opts_telnet.mdx"
import RustOpenTSDBJson from "../../07-develop/03-insert-data/_rust_opts_json.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/libtaos)](https://crates.io/crates/libtaos) ![Crates.io](https://img.shields.io/crates/d/libtaos) [![docs.rs](https://img.shields.io/docsrs/libtaos)](https://docs.rs/libtaos)
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
`libtaos` 是 TDengine 的官方 Rust 语言连接器。Rust 开发人员可以通过它开发存取 TDengine 数据库的应用软件。
`taos` 是 TDengine 的官方 Rust 语言连接器。Rust 开发人员可以通过它开发存取 TDengine 数据库的应用软件。
`libtaos` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序taosc连接 TDengine 运行实例。另外一种是 **REST 连接**,它通过 taosAdapter 的 REST 接口连接 TDengine 运行实例。你可以通过不同的 “特性(即 Cargo 关键字 features” 来指定使用哪种连接器。REST 连接支持任何平台,但原生连接支持所有 TDengine 客户端能运行的平台。
`taos` 提供两种建立连接的方式。一种是**原生连接**,它通过 TDengine 客户端驱动程序taosc连接 TDengine 运行实例。另外一种是 **Websocket 连接**,它通过 taosAdapter 的 Websocket 接口连接 TDengine 运行实例。你可以通过不同的 “特性(即 Cargo 关键字 `features`)” 来指定使用哪种连接器默认同时支持。Websocket 连接支持任何平台,原生连接支持所有 TDengine 客户端能运行的平台。
`libtaos` 的源码托管在 [GitHub](https://github.com/taosdata/libtaos-rs)。
该 Rust 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-connector-rust)。
## 支持的平台
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
REST 连接支持所有能运行 Rust 的平台。
Websocket 连接支持所有能运行 Rust 的平台。
## 版本支持
请参考[版本支持列表](/reference/connector#版本支持)
Rust 连接器仍然在快速开发中1.0 之前无法保证其向后兼容。建议使用 2.4 版本以上的 TDengine以避免已知问题。
Rust 连接器仍然在快速开发中1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine以避免已知问题。
## 安装
### 安装前准备
* 安装 Rust 开发工具链
* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
### 添加 libtaos 依赖
### 添加 taos 依赖
根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [libtaos][libtaos] 依赖:
根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [taos][taos] 依赖:
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
<Tabs defaultValue="default">
<TabItem value="default" label="同时支持">
在 `Cargo.toml` 文件中添加 [libtaos][libtaos]
在 `Cargo.toml` 文件中添加 [taos][taos]
```toml
[dependencies]
# use default feature
libtaos = "*"
taos = "*"
```
</TabItem>
<TabItem value="rest" label="REST 连接">
<Tabs defaultValue="native">
<TabItem value="native" label="仅原生连接">
在 `Cargo.toml` 文件中添加 [libtaos][libtaos],并启用 `rest` 特性。
在 `Cargo.toml` 文件中添加 [taos][taos]
```toml
[dependencies]
# use rest feature
libtaos = { version = "*", features = ["rest"]}
taos = { version = "*", default-features = false, features = ["native"] }
```
</TabItem>
<TabItem value="rest" label="仅 Websocket">
在 `Cargo.toml` 文件中添加 [taos][taos],并启用 `ws` 特性。
```toml
[dependencies]
taos = { version = "*", default-features = false, features = ["ws"] }
```
</TabItem>
</Tabs>
### 使用连接池
请在 `Cargo.toml` 中启用 `r2d2` 特性。
```toml
[dependencies]
# with taosc
libtaos = { version = "*", features = ["r2d2"] }
# or rest
libtaos = { version = "*", features = ["rest", "r2d2"] }
```
## 建立连接
[TaosCfgBuilder] 为使用者提供构造器形式的 API以便于后续创建连接或使用连接池
[TaosBuilder] 通过 DSN 连接描述字符串创建一个连接构造器。
```rust
let cfg: TaosCfg = TaosCfgBuilder::default()
.ip("127.0.0.1")
.user("root")
.pass("taosdata")
.db("log") // do not set if not require a default database.
.port(6030u16)
.build()
.expect("TaosCfg builder error");
}
let builder = TaosBuilder::from_dsn("taos://")?;
```
现在您可以使用该对象创建连接:
```rust
let conn = cfg.connect()?;
let conn = builder.build()?;
```
连接对象可以创建多个:
```rust
let conn = cfg.connect()?;
let conn2 = cfg.connect()?;
let conn1 = builder.build()?;
let conn2 = builder.build()?;
```
可以在应用中使用连接池
DSN 描述字符串基本结构如下
```rust
let pool = r2d2::Pool::builder()
.max_size(10000) // max connections
.build(cfg)?;
// ...
// Use pool to get connection
let conn = pool.get()?;
```text
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|------|------------|---|-----------|-----------|------|------|------------|-----------------------|
|driver| protocol | | username | password | host | port | database | params |
```
之后您可以对数据库进行相关操作:
各部分意义见下表:
- **driver**: 必须指定驱动名以便连接器选择何种方式创建连接,支持如下驱动名:
- **taos**: 表名使用 TDengine 连接器驱动。
- **tmq**: 使用 TMQ 订阅数据。
- **http/ws**: 使用 Websocket 创建连接。
- **https/wss**: 在 Websocket 连接方式下显示启用 SSL/TLS 连接。
- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。
- **username/password**: 用于创建连接的用户名及密码。
- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`Websocket 连接默认为 `localhost:6041` 。
- **database**: 指定默认连接的数据库名。
- **params**:其他可选参数。
一个完整的 DSN 描述字符串示例如下:
```text
taos+ws://localhost:6041/test
```
表示使用 Websocket`ws`)方式通过 `6041` 端口连接服务器 `localhost`,并指定默认数据库为 `test`。
这使得用户可以通过 DSN 指定连接方式:
```rust
async fn demo() -> Result<(), Error> {
// get connection ...
use taos::*;
// create database
conn.exec("create database if not exists demo").await?;
// change database context
conn.exec("use demo").await?;
// create table
conn.exec("create table if not exists tb1 (ts timestamp, v int)").await?;
// insert
conn.exec("insert into tb1 values(now, 1)").await?;
// query
let rows = conn.query("select * from tb1").await?;
for row in rows.rows {
println!("{}", row.into_iter().join(","));
// use native protocol.
let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
let conn1 = builder.build();
// use websocket protocol.
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
```
建立连接后,您可以进行相关数据库操作:
```rust
async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
// prepare database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
let inserted = taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
assert_eq!(inserted, 6);
let mut result = taos.query("select * from `meters`").await?;
for field in result.fields() {
println!("got field: {}", field.name());
}
let values = result.
}
```
查询数据可以通过两种方式:使用内建类型或 [serde](https://serde.rs) 序列化框架。
```rust
// Query option 1, use rows stream.
let mut rows = result.rows();
while let Some(row) = rows.try_next().await? {
for (name, value) in row {
println!("got value of {}: {}", name, value);
}
}
// Query options 2, use deserialization with serde.
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
let records: Vec<Record> = taos
.query("select * from `meters`")
.await?
.deserialize()
.try_collect()
.await?;
dbg!(records);
Ok(())
```
## 使用示例
### 写入数据
@ -153,79 +225,52 @@ async fn demo() -> Result<(), Error> {
<RustInsert />
#### InfluxDB 行协议写入
#### STMT 写入
<RustInfluxLine />
#### OpenTSDB Telnet 行协议写入
<RustOpenTSDBTelnet />
#### OpenTSDB JSON 行协议写入
<RustOpenTSDBJson />
<RustBind />
### 查询数据
<RustQuery />
### 更多示例程序
| 程序路径 | 程序说明 |
| -------------- | ----------------------------------------------------------------------------- |
| [demo.rs] | 基本API 使用示例 |
| [bailongma-rs] | 使用 TDengine 作为存储后端的 Prometheus 远程存储 API 适配器,使用 r2d2 连接池 |
## API 参考
### 连接构造器 API
### 连接构造器
[Builder Pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html) 构造器模式是 Rust 处理复杂数据类型或可选配置类型的解决方案。[libtaos] 实现中,使用连接构造器 [TaosCfgBuilder] 作为 TDengine Rust 连接器的入口。[TaosCfgBuilder] 提供对服务器、端口、数据库、用户名和密码等的可选配置。
使用 `default()` 方法可以构建一个默认参数的 [TaosCfg],用于后续连接数据库或建立连接池。
通过 DSN 来构建一个连接器构造器。
```rust
let cfg = TaosCfgBuilder::default().build()?;
let cfg = TaosBuilder::default().build()?;
```
使用构造器模式,用户可按需设置
使用 `builder` 对象创建多个连接
```rust
let cfg = TaosCfgBuilder::default()
.ip("127.0.0.1")
.user("root")
.pass("taosdata")
.db("log")
.port(6030u16)
.build()?;
```
使用 [TaosCfg] 对象创建 TDengine 连接:
```rust
let conn: Taos = cfg.connect();
let conn: Taos = cfg.build();
```
### 连接池
在复杂应用中,建议启用连接池。[libtaos] 的连接池使用 [r2d2] 实现。
在复杂应用中,建议启用连接池。[taos] 的连接池使用 [r2d2] 实现。
如下,可以生成一个默认参数的连接池。
```rust
let pool = r2d2::Pool::new(cfg)?;
let pool = TaosBuilder::from_dsn(dsn)?.pool()?;
```
同样可以使用连接池的构造器,对连接池参数进行设置:
```rust
use std::time::Duration;
let pool = r2d2::Pool::builder()
.max_size(5000) // max connections
.max_lifetime(Some(Duration::from_minutes(100))) // lifetime of each connection
.min_idle(Some(1000)) // minimal idle connections
.connection_timeout(Duration::from_minutes(2))
.build(cfg);
let dsn = "taos://localhost:6030";
let opts = PoolBuilder::new()
.max_size(5000) // max connections
.max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
.min_idle(Some(1000)) // minimal idle connections
.connection_timeout(Duration::from_secs(2));
let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
```
在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。
@ -236,44 +281,85 @@ let taos = pool.get()?;
### 连接
[Taos] 结构体是 [libtaos] 中的连接管理者,主要提供了两个 API
[Taos][struct.Taos] 对象提供了多个数据库操作的 API
1. `exec`: 执行某个非查询类 SQL 语句,例如 `CREATE``ALTER``INSERT` 等。
```rust
taos.exec().await?;
let affected_rows = taos.exec("INSERT INTO tb1 VALUES(now, NULL)").await?;
```
2. `query`:执行查询语句,返回 [TaosQueryData] 对象
2. `exec_many`: 同时(顺序)执行多个 SQL 语句
```rust
let q = taos.query("select * from log.logs").await?;
taos.exec_many([
"CREATE DATABASE test",
"USE test",
"CREATE TABLE `tb1` (`ts` TIMESTAMP, `val` INT)",
]).await?;
```
[TaosQueryData] 对象存储了查询结果数据和返回的列的基本信息(列名,类型,长度):
列信息使用 [ColumnMeta] 存储:
3. `query`:执行查询语句,返回 [ResultSet] 对象。
```rust
let cols = &q.column_meta;
let mut q = taos.query("select * from log.logs").await?;
```
[ResultSet] 对象存储了查询结果数据和返回的列的基本信息(列名,类型,长度):
列信息使用 [.fields()] 方法获取:
```rust
let cols = q.fields();
for col in cols {
println!("name: {}, type: {:?}, bytes: {}", col.name, col.type_, col.bytes);
println!("name: {}, type: {:?} , bytes: {}", col.name(), col.ty(), col.bytes());
}
```
逐行获取数据:
```rust
for (i, row) in q.rows.iter().enumerate() {
for (j, cell) in row.iter().enumerate() {
println!("cell({}, {}) data: {}", i, j, cell);
let mut rows = result.rows();
let mut nrows = 0;
while let Some(row) = rows.try_next().await? {
for (col, (name, value)) in row.enumerate() {
println!(
"[{}] got value in col {} (named `{:>8}`): {}",
nrows, col, name, value
);
}
nrows += 1;
}
```
或使用 [serde](https://serde.rs) 序列化框架。
```rust
#[derive(Debug, Deserialize)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
let records: Vec<Record> = taos
.query("select * from `meters`")
.await?
.deserialize()
.try_collect()
.await?;
```
需要注意的是,需要使用 Rust 异步函数和异步运行时。
[Taos] 提供部分 SQL 的 Rust 方法化以减少 `format!` 代码块的频率:
[Taos][struct.Taos] 提供部分 SQL 的 Rust 方法化以减少 `format!` 代码块的频率:
- `.describe(table: &str)`: 执行 `DESCRIBE` 并返回一个 Rust 数据结构。
- `.create_database(database: &str)`: 执行 `CREATE DATABASE` 语句。
@ -283,42 +369,61 @@ let taos = pool.get()?;
### 参数绑定接口
与 C 接口类似Rust 提供参数绑定接口。首先,通过 [Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]
与 C 接口类似Rust 提供参数绑定接口。首先,通过 [Taos][struct.Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]
```rust
let mut stmt: Stmt = taos.stmt("insert into ? values(?,?)")?;
let mut stmt = Stmt::init(&taos).await?;
stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
```
参数绑定对象提供了一组接口用于实现参数绑定:
##### `.set_tbname(tbname: impl ToCString)`
#### `.set_tbname(name)`
用于绑定表名。
##### `.set_tbname_tags(tbname: impl ToCString, tags: impl IntoParams)`
```rust
let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
stmt.set_tbname("d0")?;
```
#### `.set_tags(&[tag])`
当 SQL 语句使用超级表时,用于绑定子表表名和标签值:
```rust
let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(?,?)")?;
// tags can be created with any supported type, here is an example using JSON
let v = Field::Json(serde_json::from_str("{\"tag1\":\"一二三四五六七八九十\"}").unwrap());
stmt.set_tbname_tags("tb0", [&tag])?;
let mut stmt = taos.stmt("insert into ? using stb0 tags(?) values(? ,?)")?;
stmt.set_tbname("d0")?;
stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
```
##### `.bind(params: impl IntoParams)`
#### `.bind(&[column])`
用于绑定值类型。使用 [Field] 结构体构建需要的类型并绑定:
用于绑定值类型。使用 [ColumnView] 结构体构建需要的类型并绑定:
```rust
let ts = Field::Timestamp(Timestamp::now());
let value = Field::Float(0.0);
stmt.bind(vec![ts, value].iter())?;
let params = vec![
ColumnView::from_millis_timestamp(vec![164000000000]),
ColumnView::from_bools(vec![true]),
ColumnView::from_tiny_ints(vec![i8::MAX]),
ColumnView::from_small_ints(vec![i16::MAX]),
ColumnView::from_ints(vec![i32::MAX]),
ColumnView::from_big_ints(vec![i64::MAX]),
ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
ColumnView::from_unsigned_ints(vec![u32::MAX]),
ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
ColumnView::from_floats(vec![f32::MAX]),
ColumnView::from_doubles(vec![f64::MAX]),
ColumnView::from_varchar(vec!["ABC"]),
ColumnView::from_nchar(vec!["涛思数据"]),
];
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
```
##### `.execute()`
#### `.execute()`
执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。
执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。执行前请确保所有数据已通过 `.add_batch` 加入到执行队列中。
```rust
stmt.execute()?;
@ -329,60 +434,84 @@ stmt.execute()?;
//stmt.execute()?;
```
### 行协议接口
一个可运行的示例请见 [GitHub 上的示例](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)。
行协议接口支持多种模式和不同精度,需要引入 schemaless 模块中的常量以进行设置:
### 订阅
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
从 DSN 开始,构建一个 TMQ 连接器。
```rust
use libtaos::*;
use libtaos::schemaless::*;
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
- InfluxDB 行协议
创建消费者:
```rust
let lines = [
"st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"pass\",c2=false 1626006833639000000"
"st,t1=abc,t2=def,t3=anything c1=3i64,c3=L\"abc\",c4=4f64 1626006833639000000"
];
taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANOSECONDS)?;
```
```rust
let mut consumer = tmq.build()?;
```
- OpenTSDB Telnet 协议
消费者可订阅一个或多个 `TOPIC`。
```rust
let lines = ["sys.if.bytes.out 1479496100 1.3E3 host=web01 interface=eth0"];
taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?;
```
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
- OpenTSDB JSON 协议
TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) 类型,可以使用相应 API 对每个消息进行消费,并通过 `.commit` 进行已消费标记。
```rust
let lines = [r#"
{
"metric": "st",
"timestamp": 1626006833,
"value": 10,
"tags": {
"t1": true,
"t2": false,
"t3": 10,
"t4": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}"#];
taos.schemaless_insert(&lines, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_SECONDS)?;
```
}
consumer.commit(offset).await?;
}
}
```
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/libtaos>。
停止订阅:
[libtaos]: https://github.com/taosdata/libtaos-rs
[tdengine]: https://github.com/taosdata/TDengine
[bailongma-rs]: https://github.com/taosdata/bailongma-rs
```rust
consumer.unsubscribe().await;
```
对于 TMQ DSN, 有以下配置项可以进行设置,需要注意的是,`group.id` 是必须的。
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
- `client.id`: 可选的订阅客户端识别项。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。
[taos]: https://github.com/taosdata/rust-connector-taos
[r2d2]: https://crates.io/crates/r2d2
[demo.rs]: https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs
[TaosCfgBuilder]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfgBuilder.html
[TaosCfg]: https://docs.rs/libtaos/latest/libtaos/struct.TaosCfg.html
[Taos]: https://docs.rs/libtaos/latest/libtaos/struct.Taos.html
[TaosQueryData]: https://docs.rs/libtaos/latest/libtaos/field/struct.TaosQueryData.html
[Field]: https://docs.rs/libtaos/latest/libtaos/field/enum.Field.html
[Stmt]: https://docs.rs/libtaos/latest/libtaos/stmt/struct.Stmt.html
[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html
[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html
[struct.Taos]: https://docs.rs/taos/latest/taos/struct.Taos.html
[Stmt]: https://docs.rs/taos/latest/taos/struct.Stmt.html

View File

@ -19,15 +19,15 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
对照矩阵如下:
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | ------------- | -------------- | ------------ |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | ------------- | -------------- | ------------ |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● |
注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。

View File

@ -3,8 +3,7 @@ title: Schemaless 写入
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
---
在物联网应用中常会采集比较多的数据项用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级或者设备自身的硬件调整等原因数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作TDengine
从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时Schemaless
在物联网应用中常会采集比较多的数据项用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级或者设备自身的硬件调整等原因数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时Schemaless
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别你也可以通过SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
@ -41,10 +40,10 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
| -------- | -------- | ------------ | -------------- |
| 1 | 无或 f64 | double | 8 |
| 2 | f32 | float | 4 |
| 3 | i8 | TinyInt | 1 |
| 4 | i16 | SmallInt | 2 |
| 5 | i32 | Int | 4 |
| 6 | i64 或 i | Bigint | 8 |
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
| 5 | i32/u32 | Int/UInt | 4 |
| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
@ -69,20 +68,21 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
```
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t\*” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
为了让用户可以指定生成的表名可以通过配置smlChildTableName来指定比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1注意如果多行数据tname相同但是后面的tag_set不同则使用第一次自动建表时指定的tag_set其他的会忽略
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。
5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为
NULL。
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
7. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值
8. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码
8. 为了提高写入的效率默认假设同一个超级表中field_set的顺序是一样的第一条数据包含所有的field后面的数据按照这个顺序如果顺序不一样需要配置参数smlDataFormat为false否则数据写入按照相同顺序写入库中数据会异常
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
:::

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

After

Width:  |  Height:  |  Size: 37 KiB

View File

@ -103,12 +103,12 @@ typedef struct SDataBlockInfo {
int16_t hasVarCol;
uint32_t capacity;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
TSKEY watermark;// used for stream
int64_t version; // used for stream, and need serialization
int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
TSKEY watermark; // used for stream
} SDataBlockInfo;
typedef struct SSDataBlock {
@ -268,6 +268,15 @@ typedef struct SSortExecInfo {
int32_t readBytes; // read io bytes
} SSortExecInfo;
// stream special block column
#define START_TS_COLUMN_INDEX 0
#define END_TS_COLUMN_INDEX 1
#define UID_COLUMN_INDEX 2
#define GROUPID_COLUMN_INDEX 3
#define CALCULATE_START_TS_COLUMN_INDEX 4
#define CALCULATE_END_TS_COLUMN_INDEX 5
#ifdef __cplusplus
}
#endif

View File

@ -199,6 +199,7 @@ bool fmIsUserDefinedFunc(int32_t funcId);
bool fmIsDistExecFunc(int32_t funcId);
bool fmIsForbidFillFunc(int32_t funcId);
bool fmIsForbidStreamFunc(int32_t funcId);
bool fmIsForbidSuperTableFunc(int32_t funcId);
bool fmIsIntervalInterpoFunc(int32_t funcId);
bool fmIsInterpFunc(int32_t funcId);
bool fmIsLastRowFunc(int32_t funcId);

View File

@ -34,6 +34,8 @@ typedef struct SStreamTask SStreamTask;
enum {
STREAM_STATUS__NORMAL = 0,
STREAM_STATUS__STOP,
STREAM_STATUS__FAILED,
STREAM_STATUS__RECOVER,
};

View File

@ -194,6 +194,9 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${serverName} || :
${csudo}rm -f ${bin_link_dir}/${adapterName} || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/${demoName} || :
${csudo}rm -f ${bin_link_dir}/${benchmarkName} || :
${csudo}rm -f ${bin_link_dir}/${dumpName} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
@ -205,7 +208,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
[ -x ${install_main_dir}/bin/${tmqName} ] && ${csudo}ln -s ${install_main_dir}/bin/${tmqName} ${bin_link_dir}/${tmqName} || :
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
@ -964,12 +966,17 @@ function installProduct() {
## ==============================Main program starts from here============================
serverFqdn=$(hostname)
if [ "$verType" == "server" ]; then
# Install server and client
if [ -x ${bin_dir}/${serverName} ]; then
update_flag=1
updateProduct
# Check default 2.x data file.
if [ -x ${data_dir}/dnode/dnodeCfg.json ]; then
echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of tdengine 2.x, please clear it before installing!\033[0m"
else
installProduct
# Install server and client
if [ -x ${bin_dir}/${serverName} ]; then
update_flag=1
updateProduct
else
installProduct
fi
fi
elif [ "$verType" == "client" ]; then
interactiveFqdn=no

View File

@ -5,6 +5,10 @@ if (DEFINED GRANT_CFG_INCLUDE_DIR)
add_definitions(-DGRANTS_CFG)
endif()
IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT)
ENDIF ()
target_include_directories(
common
PUBLIC "${TD_SOURCE_DIR}/include/common"

View File

@ -135,12 +135,12 @@ static const SSysDbTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema userTblsSchema[] = {

View File

@ -405,9 +405,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
if (cfgAddInt32(pCfg, "numOfVnodeWriteThreads", tsNumOfVnodeWriteThreads, 1, 1024, 0) != 0) return -1;
// tsNumOfVnodeSyncThreads = tsNumOfCores;
tsNumOfVnodeSyncThreads = 32;
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 1);
tsNumOfVnodeSyncThreads = tsNumOfCores * 2;
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1;
tsNumOfQnodeQueryThreads = tsNumOfCores * 2;

View File

@ -521,7 +521,6 @@ bool tdSTSRowIterNext(STSRowIter *pIter, SCellVal *pVal) {
tdSTSRowIterGetTpVal(pIter, pCol->type, pCol->offset - sizeof(TSKEY), pVal);
} else if (TD_IS_KV_ROW(pIter->pRow)) {
tdSTSRowIterGetKvVal(pIter, pCol->colId, &pIter->kvIdx, pVal);
ASSERT(0);
} else {
ASSERT(0);
}

View File

@ -53,14 +53,27 @@ static bool dmCheckDiskSpace() {
osUpdate();
if (!osDataSpaceAvailable()) {
dError("free disk size: %f GB, too little, require %f GB at least at least , quit", (double)tsDataSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsDataSpace.reserved / 1024.0 / 1024.0 / 1024.0);
terrno = TSDB_CODE_NO_AVAIL_DISK;
return false;
}
if (!osLogSpaceAvailable()) {
dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsLogSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsLogSpace.reserved / 1024.0 / 1024.0 / 1024.0);
terrno = TSDB_CODE_NO_AVAIL_DISK;
return false;
}
if (!osTempSpaceAvailable()) {
dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsTempSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsTempSpace.reserved / 1024.0 / 1024.0 / 1024.0);
terrno = TSDB_CODE_NO_AVAIL_DISK;
return false;
}
return true;
}
static bool dmCheckDataDirVersion() {
char checkDataDirJsonFileName[PATH_MAX];
snprintf(checkDataDirJsonFileName, PATH_MAX, "%s/dnode/dnodeCfg.json", tsDataDir);
if (taosCheckExistFile(checkDataDirJsonFileName)) {
dError("The default data directory %s contains old data of tdengine 2.x, please clear it before running!", tsDataDir);
return false;
}
return true;
@ -68,6 +81,7 @@ static bool dmCheckDiskSpace() {
int32_t dmInit(int8_t rtype) {
dInfo("start to init dnode env");
if (!dmCheckDataDirVersion()) return -1;
if (!dmCheckDiskSpace()) return -1;
if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
if (dmInitSystem() != 0) return -1;

View File

@ -15,6 +15,7 @@ target_include_directories(
target_link_libraries(
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser
)
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(mnode grant)
ADD_DEFINITIONS(-D_GRANT)

View File

@ -1419,7 +1419,7 @@ const char *mndGetDbStr(const char *src) {
return pos;
}
int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
static int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
int64_t v = 0;
switch (unit) {
case 's':
@ -1444,7 +1444,7 @@ int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
return v;
}
char *buildRetension(SArray *pRetension) {
static char *buildRetension(SArray *pRetension) {
size_t size = taosArrayGetSize(pRetension);
if (size == 0) {
return NULL;

View File

@ -197,6 +197,30 @@ void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream) {
sdbRelease(pSdb, pStream);
}
static void mndShowStreamStatus(char *dst, SStreamObj *pStream) {
int8_t status = atomic_load_8(&pStream->status);
if (status == STREAM_STATUS__NORMAL) {
strcpy(dst, "normal");
} else if (status == STREAM_STATUS__STOP) {
strcpy(dst, "stop");
} else if (status == STREAM_STATUS__FAILED) {
strcpy(dst, "failed");
} else if (status == STREAM_STATUS__RECOVER) {
strcpy(dst, "recover");
}
}
static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) {
int8_t trigger = pStream->trigger;
if (trigger == STREAM_TRIGGER_AT_ONCE) {
strcpy(dst, "at once");
} else if (trigger == STREAM_TRIGGER_WINDOW_CLOSE) {
strcpy(dst, "window close");
} else if (trigger == STREAM_TRIGGER_MAX_DELAY) {
strcpy(dst, "max delay");
}
}
static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) {
if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 ||
pCreate->targetStbFullName[0] == 0) {
@ -926,8 +950,11 @@ static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)sql, false);
char status[20 + VARSTR_HEADER_SIZE] = {0};
mndShowStreamStatus(&status[VARSTR_HEADER_SIZE], pStream);
varDataSetLen(status, strlen(varDataVal(status)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->status, true);
colDataAppend(pColInfo, numOfRows, (const char *)&status, false);
char sourceDB[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&n, pStream->sourceDb, T_NAME_ACCT | T_NAME_DB);
@ -958,8 +985,11 @@ static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->watermark, false);
char trigger[20 + VARSTR_HEADER_SIZE] = {0};
mndShowStreamTrigger(&trigger[VARSTR_HEADER_SIZE], pStream);
varDataSetLen(trigger, strlen(varDataVal(trigger)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->trigger, false);
colDataAppend(pColInfo, numOfRows, (const char *)&trigger, false);
numOfRows++;
sdbRelease(pSdb, pStream);

View File

@ -86,6 +86,11 @@ target_link_libraries(
PUBLIC stream
PUBLIC index
)
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
ENDIF ()
target_compile_definitions(vnode PUBLIC -DMETA_REFACT)
if(${BUILD_WITH_INVERTEDINDEX})

View File

@ -171,7 +171,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq);
// sma

View File

@ -209,8 +209,8 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
return 0;
_err:
metaError("vgId:%d, failed to create stb:%s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name,
pReq->suid, tstrerror(terrno));
metaError("vgId:%d, failed to create stb:%s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name, pReq->suid,
tstrerror(terrno));
return -1;
}
@ -304,7 +304,8 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
if (ret < 0) {
ASSERT(0);
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
// ASSERT(0);
return -1;
}
@ -1196,6 +1197,9 @@ static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) {
goto _exit;
}
metaDebug("vgId:%d, set schema:(%" PRId64 ") sver:%d since %s", TD_VID(pMeta->pVnode), pME->uid, pSW->version,
tstrerror(terrno));
_exit:
taosMemoryFree(pVal);
tEncoderClear(&coder);

View File

@ -201,8 +201,9 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
}
SBatchDeleteReq deleteReq;
SSubmitReq *pSubmitReq = tdBlockToSubmit((const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq);
SSubmitReq *pSubmitReq =
tdBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq);
if (!pSubmitReq) {
smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),

View File

@ -13,10 +13,44 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tcommon.h"
#include "tmsg.h"
#include "tq.h"
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid,
const char* stbFullName, int32_t vgId, SBatchDeleteReq* deleteReq) {
int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
SBatchDeleteReq* deleteReq) {
ASSERT(pDataBlock->info.type == STREAM_DELETE_RESULT);
int32_t totRow = pDataBlock->info.rows;
SColumnInfoData* pTsCol = taosArrayGet(pDataBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX);
for (int32_t row = 0; row < totRow; row++) {
int64_t ts = *(int64_t*)colDataGetData(pTsCol, row);
/*int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);*/
int64_t groupId = 0;
char* name = buildCtbNameByGroupId(stbFullName, groupId);
tqDebug("stream delete msg: groupId :%ld, name: %s", groupId, name);
SMetaReader mr = {0};
metaReaderInit(&mr, pVnode->pMeta, 0);
if (metaGetTableEntryByName(&mr, name) < 0) {
metaReaderClear(&mr);
taosMemoryFree(name);
return -1;
}
int64_t uid = mr.me.uid;
metaReaderClear(&mr);
taosMemoryFree(name);
SSingleDeleteReq req = {
.ts = ts,
.uid = uid,
};
taosArrayPush(deleteReq->deleteReqs, &req);
}
return 0;
}
SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
int64_t suid, const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq) {
SSubmitReq* ret = NULL;
SArray* schemaReqs = NULL;
SArray* schemaReqSz = NULL;
@ -33,9 +67,13 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
schemaReqSz = taosArrayInit(sz, sizeof(int32_t));
for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_DATA) {
//
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
int32_t padding1 = 0;
void* padding2 = taosMemoryMalloc(1);
taosArrayPush(schemaReqSz, &padding1);
taosArrayPush(schemaReqs, &padding2);
}
STagVal tagVal = {
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
.type = TSDB_DATA_TYPE_UBIGINT,
@ -97,7 +135,10 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
int32_t cap = sizeof(SSubmitReq);
for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
int32_t rows = pDataBlock->info.rows;
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
continue;
}
int32_t rows = pDataBlock->info.rows;
// TODO min
int32_t rowSize = pDataBlock->info.rowSize;
int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema);
@ -119,6 +160,11 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
SSubmitBlk* blkHead = POINTER_SHIFT(ret, sizeof(SSubmitReq));
for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
pDeleteReq->suid = suid;
tdBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
continue;
}
blkHead->numOfRows = htonl(pDataBlock->info.rows);
blkHead->sversion = htonl(pTSchema->version);
@ -188,7 +234,7 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(pTask->tbSink.pTSchema);
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
SSubmitReq* pReq = tdBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq);
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
@ -201,12 +247,14 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(0);
}
SEncoder encoder;
void* buf = taosMemoryCalloc(1, len + sizeof(SMsgHead));
void* buf = rpcMallocCont(len + sizeof(SMsgHead));
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
tEncoderInit(&encoder, abuf, len);
tEncodeSBatchDeleteReq(&encoder, &deleteReq);
tEncoderClear(&encoder);
((SMsgHead*)buf)->vgId = pVnode->config.vgId;
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
SRpcMsg msg = {
.msgType = TDMT_VND_BATCH_DEL,

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsdb.h"
#include "osDef.h"
#include "tsdb.h"
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
typedef enum {
@ -130,8 +130,8 @@ struct STsdbReader {
SBlockLoadSuppInfo suppInfo;
STsdbReadSnap* pReadSnap;
SIOCostSummary cost;
STSchema* pSchema;// the newest version schema
STSchema* pMemSchema;// the previous schema for in-memory data, to avoid load schema too many times
STSchema* pSchema; // the newest version schema
STSchema* pMemSchema; // the previous schema for in-memory data, to avoid load schema too many times
SDataFReader* pFileReader;
SVersionRange verRange;
@ -1213,17 +1213,17 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
return code;
}
static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, int64_t key, SFileBlockDumpInfo* pDumpInfo) {
static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pBlockData, int64_t key,
SFileBlockDumpInfo* pDumpInfo) {
// opt version
// 1. it is not a border point
// 2. the direct next point is not an duplicated timestamp
if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
(pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
int32_t step = pReader->order == TSDB_ORDER_ASC? 1:-1;
int32_t step = pReader->order == TSDB_ORDER_ASC ? 1 : -1;
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
if (nextKey != key) { // merge is not needed
if (nextKey != key) { // merge is not needed
doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
pDumpInfo->rowIndex += step;
return true;
@ -1239,7 +1239,7 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader*
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, -1);
}
if (sversion == pReader->pSchema->version) {
if (pReader->pSchema && sversion == pReader->pSchema->version) {
return pReader->pSchema;
}
@ -1265,10 +1265,10 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
SBlockData* pBlockData = &pReader->status.fileBlockData;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
SArray* pDelList = pBlockScanInfo->delSkyline;
bool freeTSRow = false;
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
SArray* pDelList = pBlockScanInfo->delSkyline;
bool freeTSRow = false;
uint64_t uid = pBlockScanInfo->uid;
// ascending order traverse
@ -1547,6 +1547,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo*
SBlockData* pBlockData = &pReader->status.fileBlockData;
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
int32_t numOfSub = 1;
int64_t st = taosGetTimestampUs();
while (1) {
@ -1556,6 +1558,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo*
pDumpInfo->rowIndex += step;
SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
numOfSub = pBlock->nSubBlock;
if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) {
setBlockAllDumped(pDumpInfo, pBlock, pReader->order);
break;
@ -1585,9 +1589,9 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo*
setComposedBlockFlag(pReader, true);
int64_t et = taosGetTimestampUs();
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, subBlock:%d, brange:%" PRIu64 "-%" PRIu64
" rows:%d, elapsed time:%.2f ms %s",
pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pReader, pBlockScanInfo->uid, numOfSub, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr);
return TSDB_CODE_SUCCESS;
@ -2149,7 +2153,7 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
}
TSDBROW* pRow = tsdbTbDataIterGet(pIter->iter);
TSDBKEY key = {.ts = pRow->pTSRow->ts, .version = pRow->version};
TSDBKEY key = {.ts = pRow->pTSRow->ts, .version = pRow->version};
if (outOfTimeWindow(key.ts, &pReader->window)) {
pIter->hasVal = false;
return NULL;
@ -2182,7 +2186,6 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
}
}
int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader) {
while (1) {
@ -2314,9 +2317,8 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
STsdbReader* pReader, bool* freeTSRow) {
TSDBROW* pNextRow = NULL;
TSDBROW current = *pRow;
TSDBROW current = *pRow;
{ // if the timestamp of the next valid row has a different ts, return current row directly
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
@ -2346,6 +2348,10 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe
// get the correct schema for data in memory
STSchema* pTSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(&current), pReader, uid);
if (pReader->pSchema == NULL) {
pReader->pSchema = pTSchema;
}
tRowMergerInit2(&merge, pReader->pSchema, &current, pTSchema);
STSchema* pTSchema1 = doGetSchemaForTSRow(TSDBROW_SVERSION(pNextRow), pReader, uid);
@ -2386,8 +2392,8 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
tRowMergerGetRow(&merge, pTSRow);
}
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow,
int64_t endKey, bool* freeTSRow) {
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow, int64_t endKey,
bool* freeTSRow) {
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
SArray* pDelList = pBlockScanInfo->delSkyline;
@ -2442,7 +2448,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock);
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
SColVal colVal = {0};
int32_t i = 0, j = 0;
@ -2528,7 +2534,7 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
do {
STSRow* pTSRow = NULL;
bool freeTSRow = false;
bool freeTSRow = false;
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &pTSRow, endKey, &freeTSRow);
if (pTSRow == NULL) {
break;
@ -2577,9 +2583,7 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
return metaGetIvtIdx(pMeta);
}
uint64_t getReaderMaxVersion(STsdbReader *pReader) {
return pReader->verRange.maxVer;
}
uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
/**
* @brief Get all suids since suid
@ -2757,7 +2761,8 @@ void tsdbReaderClose(STsdbReader* pReader) {
SIOCostSummary* pCost = &pReader->cost;
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
" SMA-time:%.2f ms, fileBlocks:%" PRId64 ", fileBlocks-time:%.2f ms, "
" SMA-time:%.2f ms, fileBlocks:%" PRId64
", fileBlocks-time:%.2f ms, "
"build in-memory-block-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s",
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime,
pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock,
@ -2765,7 +2770,9 @@ void tsdbReaderClose(STsdbReader* pReader) {
taosMemoryFree(pReader->idStr);
taosMemoryFree(pReader->pSchema);
taosMemoryFree(pReader->pMemSchema);
if (pReader->pMemSchema != pReader->pSchema) {
taosMemoryFree(pReader->pMemSchema);
}
taosMemoryFreeClear(pReader);
}

View File

@ -145,7 +145,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
int32_t len;
int32_t ret;
vTrace("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
version);
pVnode->state.applied = version;
@ -1071,6 +1071,7 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void
// TODO
}
}
taosArrayDestroy(deleteReq.deleteReqs);
return 0;
}

View File

@ -883,6 +883,32 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName
CTG_RET(code);
}
int32_t ctgHashValueComp(void const *lp, void const *rp) {
uint32_t *key = (uint32_t *)lp;
SVgroupInfo *pVg = *(SVgroupInfo **)rp;
if (*key < pVg->hashBegin) {
return -1;
} else if (*key > pVg->hashEnd) {
return 1;
}
return 0;
}
int ctgVgInfoComp(const void* lp, const void* rp) {
SVgroupInfo *pLeft = *(SVgroupInfo **)lp;
SVgroupInfo *pRight = *(SVgroupInfo **)rp;
if (pLeft->hashBegin < pRight->hashBegin) {
return -1;
} else if (pLeft->hashBegin > pRight->hashBegin) {
return 1;
}
return 0;
}
int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo *dbInfo, SCtgTbHashsCtx *pCtx, char* dbFName, SArray* pNames, bool update) {
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
@ -923,9 +949,19 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
}
}
taosHashCancelIterate(dbInfo->vgHash, pIter);
return TSDB_CODE_SUCCESS;
}
SArray* pVgList = taosArrayInit(vgNum, POINTER_BYTES);
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
while (pIter) {
taosArrayPush(pVgList, &pIter);
pIter = taosHashIterate(dbInfo->vgHash, pIter);
}
taosArraySort(pVgList, ctgVgInfoComp);
char tbFullName[TSDB_TABLE_FNAME_LEN];
sprintf(tbFullName, "%s.", dbFName);
int32_t offset = strlen(tbFullName);
@ -940,25 +976,20 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
uint32_t hashValue = (*fp)(tbFullName, (uint32_t)tbNameLen);
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
while (pIter) {
vgInfo = pIter;
if (hashValue >= vgInfo->hashBegin && hashValue <= vgInfo->hashEnd) {
taosHashCancelIterate(dbInfo->vgHash, pIter);
break;
}
pIter = taosHashIterate(dbInfo->vgHash, pIter);
vgInfo = NULL;
}
SVgroupInfo **p = taosArraySearch(pVgList, &hashValue, ctgHashValueComp, TD_EQ);
if (NULL == vgInfo) {
if (NULL == p) {
ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, dbFName, taosHashGetSize(dbInfo->vgHash));
ASSERT(0);
taosArrayDestroy(pVgList);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
vgInfo = *p;
SVgroupInfo* pNewVg = taosMemoryMalloc(sizeof(SVgroupInfo));
if (NULL == pNewVg) {
taosArrayDestroy(pVgList);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
@ -977,6 +1008,8 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
}
}
taosArrayDestroy(pVgList);
CTG_RET(code);
}

View File

@ -52,13 +52,6 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0)
#define START_TS_COLUMN_INDEX 0
#define END_TS_COLUMN_INDEX 1
#define UID_COLUMN_INDEX 2
#define GROUPID_COLUMN_INDEX 3
#define CALCULATE_START_TS_COLUMN_INDEX 4
#define CALCULATE_END_TS_COLUMN_INDEX 5
enum {
// when this task starts to execute, this status will set
TASK_NOT_COMPLETED = 0x1u,
@ -682,6 +675,7 @@ typedef struct SWindowRowsSup {
TSKEY prevTs;
int32_t startRowIndex;
int32_t numOfRows;
uint64_t groupId;
} SWindowRowsSup;
typedef struct SSessionAggOperatorInfo {
@ -701,6 +695,7 @@ typedef struct SSessionAggOperatorInfo {
typedef struct SResultWindowInfo {
SResultRowPosition pos;
STimeWindow win;
uint64_t groupId;
bool isOutput;
bool isClosed;
} SResultWindowInfo;
@ -1015,9 +1010,8 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs,
TSKEY* pEndTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted);
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid);

View File

@ -1277,8 +1277,12 @@ void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) {
}
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) {
bool init = false;
for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].resultInfo = getResultEntryInfo(pResult, i, rowEntryInfoOffset);
if (init) {
continue;
}
struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo;
if (isRowEntryCompleted(pResInfo) && isRowEntryInitialized(pResInfo)) {
@ -1295,6 +1299,8 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
} else {
pResInfo->initialized = true;
}
} else {
init = true;
}
}
}
@ -1943,6 +1949,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pWrapper->exchangeId);
if (pExchangeInfo == NULL) {
qWarn("failed to acquire exchange operator, since it may have been released");
taosMemoryFree(pMsg->pData);
return TSDB_CODE_SUCCESS;
}
@ -1963,6 +1970,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfBlocks,
pRsp->numOfRows);
} else {
taosMemoryFree(pMsg->pData);
pSourceDataInfo->code = code;
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
}

View File

@ -1174,10 +1174,15 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
bool isClosed = false;
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
if (isOverdue(tsCol[rowId], &pInfo->twAggSup)) {
win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
isClosed = isCloseWindow(&win, &pInfo->twAggSup);
}
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
if ((update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup) &&
if ((update || (isSignleIntervalWindow(pInfo) && isClosed &&
isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup))) && out) {
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid);
}

View File

@ -90,16 +90,18 @@ static void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, b
ts[4] = pWin->ekey + delta; // window end key
}
static void doKeepTuple(SWindowRowsSup* pRowSup, int64_t ts) {
static void doKeepTuple(SWindowRowsSup* pRowSup, int64_t ts, uint64_t groupId) {
pRowSup->win.ekey = ts;
pRowSup->prevTs = ts;
pRowSup->numOfRows += 1;
pRowSup->groupId = groupId;
}
static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsList, int32_t rowIndex) {
static void doKeepNewWindowStartInfo(SWindowRowsSup* pRowSup, const int64_t* tsList, int32_t rowIndex, uint64_t groupId) {
pRowSup->startRowIndex = rowIndex;
pRowSup->numOfRows = 0;
pRowSup->win.skey = tsList[rowIndex];
pRowSup->groupId = groupId;
}
static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey,
@ -851,23 +853,34 @@ static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t g
return TSDB_CODE_SUCCESS;
}
static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SHashObj* pUpdatedMap) {
SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
if (newPos == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
newPos->groupId = groupId;
newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset};
*(int64_t*)newPos->key = ts;
SWinRes key = {.ts = ts, .groupId = groupId};
if (taosHashPut(pUpdatedMap, &key, sizeof(SWinRes), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
taosMemoryFree(newPos);
}
return TSDB_CODE_SUCCESS;
}
static int32_t saveWinResultRow(SResultRow* result, uint64_t groupId, SHashObj* pUpdatedMap) {
return saveWinResult(result->win.skey, result->pageId, result->offset, groupId, pUpdatedMap);;
}
static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpdated) {
return saveResult(result->win.skey, result->pageId, result->offset, groupId, pUpdated);
}
static void removeResult(SArray* pUpdated, SWinRes* pKey) {
int32_t size = taosArrayGetSize(pUpdated);
int32_t index = binarySearchCom(pUpdated, size, pKey, TSDB_ORDER_DESC, compareResKey);
if (index >= 0 && 0 == compareResKey(pKey, pUpdated, index)) {
taosArrayRemove(pUpdated, index);
}
}
static void removeResults(SArray* pWins, SArray* pUpdated) {
static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) {
int32_t size = taosArrayGetSize(pWins);
for (int32_t i = 0; i < size; i++) {
SWinRes* pW = taosArrayGet(pWins, i);
removeResult(pUpdated, pW);
taosHashRemove(pUpdatedMap, pW, sizeof(SWinRes));
}
}
@ -894,11 +907,14 @@ int32_t compareWinRes(void* pKey, void* data, int32_t index) {
return -1;
}
static void removeDeleteResults(SArray* pUpdated, SArray* pDelWins) {
int32_t upSize = taosArrayGetSize(pUpdated);
static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
if (!pUpdatedMap || taosHashGetSize(pUpdatedMap) == 0) {
return;
}
int32_t delSize = taosArrayGetSize(pDelWins);
for (int32_t i = 0; i < upSize; i++) {
SResKeyPos* pResKey = taosArrayGetP(pUpdated, i);
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
SResKeyPos* pResKey = (SResKeyPos*)pIte;
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) {
taosArrayRemove(pDelWins, index);
@ -914,7 +930,7 @@ bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup) {
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup) { return isOverdue(pWin->ekey, pSup); }
static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
int32_t scanFlag, SArray* pUpdated) {
int32_t scanFlag, SHashObj* pUpdatedMap) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
@ -940,7 +956,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
}
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
saveResultRow(pResult, tableGroupId, pUpdated);
saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
}
}
@ -997,7 +1013,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
}
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
saveResultRow(pResult, tableGroupId, pUpdated);
saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
}
@ -1142,7 +1158,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
char* val = colDataGetData(pStateColInfoData, j);
if (!pInfo->hasKey) {
if (gid != pRowSup->groupId || !pInfo->hasKey) {
// todo extract method
if (IS_VAR_DATA_TYPE(pInfo->stateKey.type)) {
varDataCopy(pInfo->stateKey.pData, val);
@ -1152,10 +1168,10 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
pInfo->hasKey = true;
doKeepNewWindowStartInfo(pRowSup, tsList, j);
doKeepTuple(pRowSup, tsList[j]);
doKeepNewWindowStartInfo(pRowSup, tsList, j, gid);
doKeepTuple(pRowSup, tsList[j], gid);
} else if (compareVal(val, &pInfo->stateKey)) {
doKeepTuple(pRowSup, tsList[j]);
doKeepTuple(pRowSup, tsList[j], gid);
if (j == 0 && pRowSup->startRowIndex != 0) {
pRowSup->startRowIndex = 0;
}
@ -1177,8 +1193,8 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
// here we start a new session window
doKeepNewWindowStartInfo(pRowSup, tsList, j);
doKeepTuple(pRowSup, tsList[j]);
doKeepNewWindowStartInfo(pRowSup, tsList, j, gid);
doKeepTuple(pRowSup, tsList[j], gid);
// todo extract method
if (IS_VAR_DATA_TYPE(pInfo->stateKey.type)) {
@ -1437,7 +1453,7 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
}
}
static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) {
static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
void* pIte = NULL;
size_t keyLen = 0;
while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
@ -1446,7 +1462,7 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) {
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
int32_t code = saveResult(ts, pPos->pageId, pPos->offset, groupId, resWins);
int32_t code = saveWinResult(ts, pPos->pageId, pPos->offset, groupId, resWins);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -1455,7 +1471,7 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) {
}
static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
SHashObj* pPullDataMap, SArray* closeWins, SArray* pRecyPages,
SHashObj* pPullDataMap, SHashObj* closeWins, SArray* pRecyPages,
SDiskbasedBuf* pDiscBuf) {
qDebug("===stream===close interval window");
void* pIte = NULL;
@ -1487,7 +1503,7 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
}
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
int32_t code = saveResult(ts, pPos->pageId, pPos->offset, groupId, closeWins);
int32_t code = saveWinResult(ts, pPos->pageId, pPos->offset, groupId, closeWins);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -1577,11 +1593,14 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
break;
}
// qInfo("===stream===%ld", pBlock->info.version);
printDataBlock(pBlock, "single interval recv");
if (pBlock->info.type == STREAM_CLEAR) {
@ -1594,7 +1613,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated);
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
continue;
}
@ -1617,17 +1636,24 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
}
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated);
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap);
}
pOperator->status = OP_RES_TO_RETURN;
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdated,
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
taosArrayPush(pUpdated, pIte);
}
taosArraySort(pUpdated, resultrowComparAsc);
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
removeDeleteResults(pUpdated, pInfo->pDelWins);
removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
taosHashCleanup(pUpdatedMap);
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
if (pInfo->pDelRes->info.rows > 0) {
return pInfo->pDelRes;
@ -1911,7 +1937,7 @@ _error:
return NULL;
}
// todo handle multiple tables cases.
// todo handle multiple timeline cases. assume no timeline interweaving
static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperatorInfo* pInfo, SSDataBlock* pBlock) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SExprSupp* pSup = &pOperator->exprSupp;
@ -1935,12 +1961,13 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
// In case of ascending or descending order scan data, only one time window needs to be kepted for each table.
TSKEY* tsList = (TSKEY*)pColInfoData->pData;
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
if (pInfo->winSup.prevTs == INT64_MIN) {
doKeepNewWindowStartInfo(pRowSup, tsList, j);
doKeepTuple(pRowSup, tsList[j]);
} else if (tsList[j] - pRowSup->prevTs <= gap && (tsList[j] - pRowSup->prevTs) >= 0) {
if (gid != pRowSup->groupId || pInfo->winSup.prevTs == INT64_MIN) {
doKeepNewWindowStartInfo(pRowSup, tsList, j, gid);
doKeepTuple(pRowSup, tsList[j], gid);
} else if ((tsList[j] - pRowSup->prevTs >= 0) && tsList[j] - pRowSup->prevTs <= gap ||
(pRowSup->prevTs - tsList[j] >= 0 ) && (pRowSup->prevTs - tsList[j] <= gap)) {
// The gap is less than the threshold, so it belongs to current session window that has been opened already.
doKeepTuple(pRowSup, tsList[j]);
doKeepTuple(pRowSup, tsList[j], gid);
if (j == 0 && pRowSup->startRowIndex != 0) {
pRowSup->startRowIndex = 0;
}
@ -1963,8 +1990,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
// here we start a new session window
doKeepNewWindowStartInfo(pRowSup, tsList, j);
doKeepTuple(pRowSup, tsList[j]);
doKeepNewWindowStartInfo(pRowSup, tsList, j, gid);
doKeepTuple(pRowSup, tsList[j], gid);
}
}
@ -2867,7 +2894,7 @@ STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
}
static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
SArray* pUpdated) {
SHashObj* pUpdatedMap) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
@ -2949,8 +2976,8 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey,
NULL, TSDB_ORDER_ASC);
}
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) {
saveResultRow(pResult, tableGroupId, pUpdated);
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdatedMap) {
saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
@ -3056,6 +3083,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
SOperatorInfo* downstream = pOperator->pDownstream[0];
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
TSKEY maxTs = INT64_MIN;
SExprSupp* pSup = &pOperator->exprSupp;
@ -3113,7 +3142,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
clearSpecialDataBlock(pInfo->pUpdateRes);
removeDeleteResults(pUpdated, pInfo->pDelWins);
removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
pOperator->status = OP_RES_TO_RETURN;
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
break;
@ -3140,7 +3169,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
taosArrayDestroy(pUpWins);
continue;
}
removeResults(pUpWins, pUpdated);
removeResults(pUpWins, pUpdatedMap);
copyDataBlock(pInfo->pUpdateRes, pBlock);
// copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
pInfo->returnUpdate = true;
@ -3158,15 +3187,15 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdated);
continue;
}
removeResults(pInfo->pDelWins, pUpdated);
removeResults(pInfo->pDelWins, pUpdatedMap);
break;
} else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated);
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
continue;
} else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) {
SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes));
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins);
removeResults(pUpWins, pUpdated);
removeResults(pUpWins, pUpdatedMap);
taosArrayDestroy(pUpWins);
if (taosArrayGetSize(pUpdated) > 0) {
break;
@ -3182,7 +3211,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
}
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdated);
doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);
if (IS_FINAL_OP(pInfo)) {
int32_t chIndex = getChildIndex(pBlock);
int32_t size = taosArrayGetSize(pInfo->pChildren);
@ -3207,12 +3236,19 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
if (IS_FINAL_OP(pInfo)) {
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pInfo->pPullDataMap,
pUpdated, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
pUpdatedMap, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
closeChildIntervalWindow(pInfo->pChildren, pInfo->twAggSup.maxTs);
} else {
pInfo->binfo.pRes->info.watermark = pInfo->twAggSup.maxTs;
}
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
taosArrayPush(pUpdated, pIte);
}
taosHashCleanup(pUpdatedMap);
taosArraySort(pUpdated, resultrowComparAsc);
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
@ -3553,9 +3589,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pStDeleted = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
pInfo->pDelIterator = NULL;
// pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->pDelRes = createOneDataBlock(pInfo->binfo.pRes, false); // todo(liuyao) for delete
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT; // todo(liuyao) for delete
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->pChildren = NULL;
pInfo->isFinal = false;
pInfo->pPhyNode = pPhyNode;
@ -3701,7 +3735,7 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
return insertNewSessionWindow(pWinInfos, startTs, index + 1);
}
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, int32_t rows,
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t groupId,int32_t rows,
int32_t start, int64_t gap, SHashObj* pStDeleted) {
for (int32_t i = start; i < rows; ++i) {
if (!isInWindow(pWinInfo, pStartTs[i], gap) && (!pEndTs || !isInWindow(pWinInfo, pEndTs[i], gap))) {
@ -3709,7 +3743,8 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS
}
if (pWinInfo->win.skey > pStartTs[i]) {
if (pStDeleted && pWinInfo->isOutput) {
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY));
SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
pWinInfo->isOutput = false;
}
pWinInfo->win.skey = pStartTs[i];
@ -3828,7 +3863,8 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex,
compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo);
taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition));
if (pWinInfo->isOutput) {
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY));
SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
pWinInfo->isOutput = false;
}
taosArrayRemove(pInfo->streamAggSup.pCurWins, i);
@ -3878,7 +3914,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
int32_t winIndex = 0;
SResultWindowInfo* pCurWin = getSessionTimeWindow(pAggSup, startTsCols[i], endTsCols[i], groupId, gap, &winIndex);
winRows =
updateSessionWindowInfo(pCurWin, startTsCols, endTsCols, pSDataBlock->info.rows, i, pInfo->gap, pStDeleted);
updateSessionWindowInfo(pCurWin, startTsCols, endTsCols, groupId, pSDataBlock->info.rows, i, pInfo->gap, pStDeleted);
code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pOperator);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -3927,6 +3963,7 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
}
deleteWindow(pAggSup->pCurWins, winIndex, fp);
if (result) {
pCurWin->groupId = gpDatas[i];
taosArrayPush(result, pCurWin);
}
}
@ -3947,7 +3984,7 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup,
step = 1;
continue;
}
step = updateSessionWindowInfo(pCurWin, tsCols, NULL, pBlock->info.rows, i, gap, NULL);
step = updateSessionWindowInfo(pCurWin, tsCols, NULL, 0, pBlock->info.rows, i, gap, NULL);
ASSERT(isInWindow(pCurWin, tsCols[i], gap));
doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pSup, numOfOutput);
if (result) {
@ -3984,12 +4021,11 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
blockDataEnsureCapacity(pBlock, size);
size_t keyLen = 0;
while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
colDataAppend(pColInfoData, pBlock->info.rows, *Ite, false);
for (int32_t i = 1; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
colDataAppendNULL(pColInfoData, pBlock->info.rows);
}
SWinRes* res = *Ite;
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false);
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppend(pGpCol, pBlock->info.rows, (const char*)&res->groupId, false);
pBlock->info.rows += 1;
if (pBlock->info.rows + 1 >= pBlock->info.capacity) {
break;
@ -4116,7 +4152,8 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) {
int32_t size = taosArrayGetSize(pResWins);
for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i);
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY));
SWinRes res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId};
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
}
}

View File

@ -49,6 +49,7 @@ extern "C" {
#define FUNC_MGT_MULTI_ROWS_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(20)
#define FUNC_MGT_KEEP_ORDER_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(21)
#define FUNC_MGT_CUMULATIVE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(22)
#define FUNC_MGT_FORBID_STABLE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23)
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)

View File

@ -2287,7 +2287,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "interp",
.type = FUNCTION_TYPE_INTERP,
.classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
.classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC |
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_STABLE_FUNC,
.translateFunc = translateInterp,
.getEnvFunc = getSelectivityFuncEnv,
.initFunc = functionSetup,

View File

@ -498,8 +498,7 @@ int32_t functionFinalizeWithResultBuf(SqlFunctionCtx* pCtx, SSDataBlock* pBlock,
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0;
cleanupResultRowEntry(pResInfo);
pResInfo->isNullRes = (pResInfo->isNullRes == 1) ? 1 : (pResInfo->numOfRes == 0);;
char* in = finalResult;
colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes);
@ -749,6 +748,7 @@ int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
pDBuf->dsum += pSBuf->dsum;
}
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -1747,6 +1747,7 @@ int32_t minMaxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int3
}
}
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -2122,6 +2123,7 @@ int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
}
pDBuf->count += pSBuf->count;
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -2312,6 +2314,7 @@ int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
pDparam[1][2] += pSparam[1][2];
pDBuf->num += pSBuf->num;
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -2708,6 +2711,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
apercentileTransferInfo(pSBuf, pDBuf);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -3891,6 +3895,7 @@ int32_t spreadCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SSpreadInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
spreadTransferInfo(pSBuf, pDBuf);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -4063,6 +4068,7 @@ int32_t elapsedCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
elapsedTransferInfo(pSBuf, pDBuf);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -4380,6 +4386,7 @@ int32_t histogramCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
histogramTransferInfo(pSBuf, pDBuf);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}
@ -4577,6 +4584,7 @@ int32_t hllCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
hllTransferInfo(pSBuf, pDBuf);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
pDResInfo->isNullRes &= pSResInfo->isNullRes;
return TSDB_CODE_SUCCESS;
}

View File

@ -212,6 +212,8 @@ bool fmIsKeepOrderFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, F
bool fmIsCumulativeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CUMULATIVE_FUNC); }
bool fmIsForbidSuperTableFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_STABLE_FUNC); }
bool fmIsInterpFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false;

View File

@ -37,17 +37,6 @@ int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock
int32_t maxRows = 0;
for (int32_t j = 0; j < num; ++j) {
#if 0
int32_t id = pCtx[j].functionId;
/*
* ts, tag, tagprj function can not decide the output number of current query
* the number of output result is decided by main output
*/
if (id == FUNCTION_TS || id == FUNCTION_TAG || id == FUNCTION_TAGPRJ) {
continue;
}
#endif
SResultRowEntryInfo *pResInfo = GET_RES_INFO(&pCtx[j]);
if (pResInfo != NULL && maxRows < pResInfo->numOfRes) {
maxRows = pResInfo->numOfRes;

View File

@ -1467,6 +1467,25 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p
return TSDB_CODE_SUCCESS;
}
static int32_t translateForbidSuperTableFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsForbidSuperTableFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
if (!isSelectStmt(pCxt->pCurrStmt)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
SNode* pTable = pSelect->pFromTable;
if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) ||
(TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
return TSDB_CODE_SUCCESS;
}
static bool isStar(SNode* pNode) {
return (QUERY_NODE_COLUMN == nodeType(pNode)) && ('\0' == ((SColumnNode*)pNode)->tableAlias[0]) &&
(0 == strcmp(((SColumnNode*)pNode)->colName, "*"));
@ -1624,6 +1643,9 @@ static int32_t rewriteSystemInfoFunc(STranslateContext* pCxt, SNode** pNode) {
static int32_t translateNoramlFunction(STranslateContext* pCxt, SFunctionNode* pFunc) {
int32_t code = translateAggFunc(pCxt, pFunc);
if (TSDB_CODE_SUCCESS == code) {
code = translateForbidSuperTableFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateScanPseudoColumnFunc(pCxt, pFunc);
}

View File

@ -3265,7 +3265,7 @@ bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg **pDataStatis, int32_t
int32_t index = -1;
SFilterRangeCtx *ctx = info->colRange[k];
for(int32_t i = 0; i < numOfCols; ++i) {
if (pDataStatis[i]->colId == ctx->colId) {
if (pDataStatis[i] != NULL && pDataStatis[i]->colId == ctx->colId) {
index = i;
break;
}

View File

@ -124,7 +124,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
}
pInfo->numBuckets = DEFAULT_BUCKET_SIZE;
pInfo->pCloseWinSBF = NULL;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT);
pInfo->pMap = taosHashInit(DEFAULT_MAP_CAPACITY, hashFn, true, HASH_NO_LOCK);
pInfo->maxVersion = 0;
pInfo->scanGroupId = 0;

View File

@ -29,7 +29,30 @@ extern "C" {
#define CONFIG_FILE_LEN 2048
#define MAX_CONFIG_INDEX_COUNT 512
#define MAX_CONFIG_INDEX_COUNT 256
// SRaftCfgIndex ------------------------------------------
typedef struct SRaftCfgIndex {
TdFilePtr pFile;
char path[TSDB_FILENAME_LEN * 2];
SyncIndex configIndexArr[MAX_CONFIG_INDEX_COUNT];
int32_t configIndexCount;
} SRaftCfgIndex;
SRaftCfgIndex *raftCfgIndexOpen(const char *path);
int32_t raftCfgIndexClose(SRaftCfgIndex *pRaftCfgIndex);
int32_t raftCfgIndexPersist(SRaftCfgIndex *pRaftCfgIndex);
int32_t raftCfgIndexAddConfigIndex(SRaftCfgIndex *pRaftCfgIndex, SyncIndex configIndex);
cJSON *raftCfgIndex2Json(SRaftCfgIndex *pRaftCfgIndex);
char *raftCfgIndex2Str(SRaftCfgIndex *pRaftCfgIndex);
int32_t raftCfgIndexFromJson(const cJSON *pRoot, SRaftCfgIndex *pRaftCfgIndex);
int32_t raftCfgIndexFromStr(const char *s, SRaftCfgIndex *pRaftCfgIndex);
int32_t raftCfgIndexCreateFile(const char *path);
// ---------------------------------------------------------
typedef struct SRaftCfg {
SSyncCfg cfg;
@ -50,14 +73,14 @@ int32_t raftCfgClose(SRaftCfg *pRaftCfg);
int32_t raftCfgPersist(SRaftCfg *pRaftCfg);
int32_t raftCfgAddConfigIndex(SRaftCfg *pRaftCfg, SyncIndex configIndex);
cJSON * syncCfg2Json(SSyncCfg *pSyncCfg);
char * syncCfg2Str(SSyncCfg *pSyncCfg);
char * syncCfg2SimpleStr(SSyncCfg *pSyncCfg);
cJSON *syncCfg2Json(SSyncCfg *pSyncCfg);
char *syncCfg2Str(SSyncCfg *pSyncCfg);
char *syncCfg2SimpleStr(SSyncCfg *pSyncCfg);
int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg);
int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg);
cJSON * raftCfg2Json(SRaftCfg *pRaftCfg);
char * raftCfg2Str(SRaftCfg *pRaftCfg);
cJSON *raftCfg2Json(SRaftCfg *pRaftCfg);
char *raftCfg2Str(SRaftCfg *pRaftCfg);
int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg);
int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg);
@ -82,6 +105,11 @@ void raftCfgPrint2(char *s, SRaftCfg *pCfg);
void raftCfgLog(SRaftCfg *pCfg);
void raftCfgLog2(char *s, SRaftCfg *pCfg);
void raftCfgIndexPrint(SRaftCfgIndex *pCfg);
void raftCfgIndexPrint2(char *s, SRaftCfgIndex *pCfg);
void raftCfgIndexLog(SRaftCfgIndex *pCfg);
void raftCfgIndexLog2(char *s, SRaftCfgIndex *pCfg);
#ifdef __cplusplus
}
#endif

View File

@ -1132,9 +1132,11 @@ void syncNodeStart(SSyncNode* pSyncNode) {
syncNodeBecomeFollower(pSyncNode, "first start");
}
int32_t ret = 0;
// ret = syncNodeStartPingTimer(pSyncNode);
ASSERT(ret == 0);
if (pSyncNode->vgId == 1) {
int32_t ret = 0;
ret = syncNodeStartPingTimer(pSyncNode);
ASSERT(ret == 0);
}
}
void syncNodeStartStandBy(SSyncNode* pSyncNode) {
@ -1146,6 +1148,12 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode) {
int32_t electMS = TIMER_MAX_MS;
int32_t ret = syncNodeRestartElectTimer(pSyncNode, electMS);
ASSERT(ret == 0);
if (pSyncNode->vgId == 1) {
int32_t ret = 0;
ret = syncNodeStartPingTimer(pSyncNode);
ASSERT(ret == 0);
}
}
void syncNodeClose(SSyncNode* pSyncNode) {

View File

@ -18,6 +18,149 @@
#include "syncEnv.h"
#include "syncUtil.h"
// file must already exist!
SRaftCfgIndex *raftCfgIndexOpen(const char *path) {
SRaftCfgIndex *pRaftCfgIndex = taosMemoryMalloc(sizeof(SRaftCfg));
snprintf(pRaftCfgIndex->path, sizeof(pRaftCfgIndex->path), "%s", path);
pRaftCfgIndex->pFile = taosOpenFile(pRaftCfgIndex->path, TD_FILE_READ | TD_FILE_WRITE);
ASSERT(pRaftCfgIndex->pFile != NULL);
taosLSeekFile(pRaftCfgIndex->pFile, 0, SEEK_SET);
int32_t bufLen = MAX_CONFIG_INDEX_COUNT * 16;
char *pBuf = taosMemoryMalloc(bufLen);
memset(pBuf, 0, bufLen);
int64_t len = taosReadFile(pRaftCfgIndex->pFile, pBuf, bufLen);
ASSERT(len > 0);
int32_t ret = raftCfgIndexFromStr(pBuf, pRaftCfgIndex);
ASSERT(ret == 0);
taosMemoryFree(pBuf);
return pRaftCfgIndex;
}
int32_t raftCfgIndexClose(SRaftCfgIndex *pRaftCfgIndex) {
if (pRaftCfgIndex != NULL) {
int64_t ret = taosCloseFile(&(pRaftCfgIndex->pFile));
ASSERT(ret == 0);
taosMemoryFree(pRaftCfgIndex);
}
return 0;
}
int32_t raftCfgIndexPersist(SRaftCfgIndex *pRaftCfgIndex) {
ASSERT(pRaftCfgIndex != NULL);
char *s = raftCfgIndex2Str(pRaftCfgIndex);
taosLSeekFile(pRaftCfgIndex->pFile, 0, SEEK_SET);
int64_t ret = taosWriteFile(pRaftCfgIndex->pFile, s, strlen(s) + 1);
ASSERT(ret == strlen(s) + 1);
taosMemoryFree(s);
taosFsyncFile(pRaftCfgIndex->pFile);
return 0;
}
int32_t raftCfgIndexAddConfigIndex(SRaftCfgIndex *pRaftCfgIndex, SyncIndex configIndex) {
ASSERT(pRaftCfgIndex->configIndexCount <= MAX_CONFIG_INDEX_COUNT);
(pRaftCfgIndex->configIndexArr)[pRaftCfgIndex->configIndexCount] = configIndex;
++(pRaftCfgIndex->configIndexCount);
return 0;
}
cJSON *raftCfgIndex2Json(SRaftCfgIndex *pRaftCfgIndex) {
cJSON *pRoot = cJSON_CreateObject();
cJSON_AddNumberToObject(pRoot, "configIndexCount", pRaftCfgIndex->configIndexCount);
cJSON *pIndexArr = cJSON_CreateArray();
cJSON_AddItemToObject(pRoot, "configIndexArr", pIndexArr);
for (int i = 0; i < pRaftCfgIndex->configIndexCount; ++i) {
char buf64[128];
snprintf(buf64, sizeof(buf64), "%" PRId64, (pRaftCfgIndex->configIndexArr)[i]);
cJSON *pIndexObj = cJSON_CreateObject();
cJSON_AddStringToObject(pIndexObj, "index", buf64);
cJSON_AddItemToArray(pIndexArr, pIndexObj);
}
cJSON *pJson = cJSON_CreateObject();
cJSON_AddItemToObject(pJson, "SRaftCfgIndex", pRoot);
return pJson;
}
char *raftCfgIndex2Str(SRaftCfgIndex *pRaftCfgIndex) {
cJSON *pJson = raftCfgIndex2Json(pRaftCfgIndex);
char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
int32_t raftCfgIndexFromJson(const cJSON *pRoot, SRaftCfgIndex *pRaftCfgIndex) {
cJSON *pJson = cJSON_GetObjectItem(pRoot, "SRaftCfgIndex");
cJSON *pJsonConfigIndexCount = cJSON_GetObjectItem(pJson, "configIndexCount");
pRaftCfgIndex->configIndexCount = cJSON_GetNumberValue(pJsonConfigIndexCount);
cJSON *pIndexArr = cJSON_GetObjectItem(pJson, "configIndexArr");
int arraySize = cJSON_GetArraySize(pIndexArr);
ASSERT(arraySize == pRaftCfgIndex->configIndexCount);
memset(pRaftCfgIndex->configIndexArr, 0, sizeof(pRaftCfgIndex->configIndexArr));
for (int i = 0; i < arraySize; ++i) {
cJSON *pIndexObj = cJSON_GetArrayItem(pIndexArr, i);
ASSERT(pIndexObj != NULL);
cJSON *pIndex = cJSON_GetObjectItem(pIndexObj, "index");
ASSERT(cJSON_IsString(pIndex));
(pRaftCfgIndex->configIndexArr)[i] = atoll(pIndex->valuestring);
}
return 0;
}
int32_t raftCfgIndexFromStr(const char *s, SRaftCfgIndex *pRaftCfgIndex) {
cJSON *pRoot = cJSON_Parse(s);
ASSERT(pRoot != NULL);
int32_t ret = raftCfgIndexFromJson(pRoot, pRaftCfgIndex);
ASSERT(ret == 0);
cJSON_Delete(pRoot);
return 0;
}
int32_t raftCfgIndexCreateFile(const char *path) {
TdFilePtr pFile = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE);
if (pFile == NULL) {
int32_t err = terrno;
const char *errStr = tstrerror(err);
int32_t sysErr = errno;
const char *sysErrStr = strerror(errno);
sError("create raft cfg index file error, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", err, err, errStr, sysErr,
sysErrStr);
ASSERT(0);
return -1;
}
SRaftCfgIndex raftCfgIndex;
memset(raftCfgIndex.configIndexArr, 0, sizeof(raftCfgIndex.configIndexArr));
raftCfgIndex.configIndexCount = 1;
raftCfgIndex.configIndexArr[0] = -1;
char *s = raftCfgIndex2Str(&raftCfgIndex);
int64_t ret = taosWriteFile(pFile, s, strlen(s) + 1);
ASSERT(ret == strlen(s) + 1);
taosMemoryFree(s);
taosCloseFile(&pFile);
return 0;
}
// ---------------------------------------
// file must already exist!
SRaftCfg *raftCfgOpen(const char *path) {
SRaftCfg *pCfg = taosMemoryMalloc(sizeof(SRaftCfg));
@ -101,7 +244,7 @@ cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
char *syncCfg2Str(SSyncCfg *pSyncCfg) {
cJSON *pJson = syncCfg2Json(pSyncCfg);
char * serialized = cJSON_Print(pJson);
char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
@ -109,7 +252,7 @@ char *syncCfg2Str(SSyncCfg *pSyncCfg) {
char *syncCfg2SimpleStr(SSyncCfg *pSyncCfg) {
if (pSyncCfg != NULL) {
int32_t len = 512;
char * s = taosMemoryMalloc(len);
char *s = taosMemoryMalloc(len);
memset(s, 0, len);
snprintf(s, len, "{r-num:%d, my:%d, ", pSyncCfg->replicaNum, pSyncCfg->myIndex);
@ -206,7 +349,7 @@ cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
char *raftCfg2Str(SRaftCfg *pRaftCfg) {
cJSON *pJson = raftCfg2Json(pRaftCfg);
char * serialized = cJSON_Print(pJson);
char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
@ -285,7 +428,7 @@ int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
(pRaftCfg->configIndexArr)[i] = atoll(pIndex->valuestring);
}
cJSON * pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg");
cJSON *pJsonSyncCfg = cJSON_GetObjectItem(pJson, "SSyncCfg");
int32_t code = syncCfgFromJson(pJsonSyncCfg, &(pRaftCfg->cfg));
ASSERT(code == 0);
@ -361,3 +504,30 @@ void raftCfgLog2(char *s, SRaftCfg *pCfg) {
sTrace("raftCfgLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized);
taosMemoryFree(serialized);
}
// ---------
void raftCfgIndexPrint(SRaftCfgIndex *pCfg) {
char *serialized = raftCfgIndex2Str(pCfg);
printf("raftCfgIndexPrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized);
fflush(NULL);
taosMemoryFree(serialized);
}
void raftCfgIndexPrint2(char *s, SRaftCfgIndex *pCfg) {
char *serialized = raftCfgIndex2Str(pCfg);
printf("raftCfgIndexPrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized);
fflush(NULL);
taosMemoryFree(serialized);
}
void raftCfgIndexLog(SRaftCfgIndex *pCfg) {
char *serialized = raftCfgIndex2Str(pCfg);
sTrace("raftCfgIndexLog | len:%" PRIu64 " | %s", strlen(serialized), serialized);
taosMemoryFree(serialized);
}
void raftCfgIndexLog2(char *s, SRaftCfgIndex *pCfg) {
char *serialized = raftCfgIndex2Str(pCfg);
sTrace("raftCfgIndexLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized);
taosMemoryFree(serialized);
}

View File

@ -15,12 +15,55 @@
#include "syncTimeout.h"
#include "syncElection.h"
#include "syncRaftCfg.h"
#include "syncReplication.h"
#include "syncRespMgr.h"
static void syncNodeCleanConfigIndex(SSyncNode* ths) {
int32_t newArrIndex = 0;
SyncIndex newConfigIndexArr[MAX_CONFIG_INDEX_COUNT];
memset(newConfigIndexArr, 0, sizeof(newConfigIndexArr));
SSnapshot snapshot = {0};
if (ths->pFsm != NULL && ths->pFsm->FpGetSnapshotInfo != NULL) {
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
}
if (snapshot.lastApplyIndex != SYNC_INDEX_INVALID) {
for (int i = 0; i < ths->pRaftCfg->configIndexCount; ++i) {
if (ths->pRaftCfg->configIndexArr[i] < snapshot.lastConfigIndex) {
// pass
;
} else {
// save
newConfigIndexArr[newArrIndex] = ths->pRaftCfg->configIndexArr[i];
++newArrIndex;
}
}
int32_t oldCnt = ths->pRaftCfg->configIndexCount;
ths->pRaftCfg->configIndexCount = newArrIndex;
memcpy(ths->pRaftCfg->configIndexArr, newConfigIndexArr, sizeof(newConfigIndexArr));
int32_t code = raftCfgPersist(ths->pRaftCfg);
ASSERT(code == 0);
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "clean config index arr, old-cnt:%d, new-cnt:%d", oldCnt,
ths->pRaftCfg->configIndexCount);
syncNodeEventLog(ths, logBuf);
} while (0);
}
}
int32_t syncNodeTimerRoutine(SSyncNode* ths) {
syncNodeEventLog(ths, "timer routines");
if (ths->vgId == 1) {
syncNodeCleanConfigIndex(ths);
}
#if 0
if (ths->vgId != 1) {
syncRespClean(ths->pSyncRespMgr);
@ -41,7 +84,7 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
// syncNodePingAll(ths);
// syncNodePingPeers(ths);
sTrace("vgId:%d, sync timeout, type:ping count:%d", ths->vgId, ths->pingTimerCounter);
// sTrace("vgId:%d, sync timeout, type:ping count:%d", ths->vgId, ths->pingTimerCounter);
syncNodeTimerRoutine(ths);
}

View File

@ -56,6 +56,7 @@ add_executable(syncRaftLogTest3 "")
add_executable(syncLeaderTransferTest "")
add_executable(syncReconfigFinishTest "")
add_executable(syncRestoreFromSnapshot "")
add_executable(syncRaftCfgIndexTest "")
target_sources(syncTest
@ -290,6 +291,10 @@ target_sources(syncRestoreFromSnapshot
PRIVATE
"syncRestoreFromSnapshot.cpp"
)
target_sources(syncRaftCfgIndexTest
PRIVATE
"syncRaftCfgIndexTest.cpp"
)
target_include_directories(syncTest
@ -582,6 +587,11 @@ target_include_directories(syncRestoreFromSnapshot
"${TD_SOURCE_DIR}/include/libs/sync"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_include_directories(syncRaftCfgIndexTest
PUBLIC
"${TD_SOURCE_DIR}/include/libs/sync"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
target_link_libraries(syncTest
@ -816,6 +826,10 @@ target_link_libraries(syncRestoreFromSnapshot
sync
gtest_main
)
target_link_libraries(syncRaftCfgIndexTest
sync
gtest_main
)
enable_testing()

View File

@ -0,0 +1,96 @@
#include "syncRaftStore.h"
//#include <gtest/gtest.h>
#include <stdio.h>
#include "syncIO.h"
#include "syncInt.h"
#include "syncRaftCfg.h"
#include "syncUtil.h"
void logTest() {
sTrace("--- sync log test: trace");
sDebug("--- sync log test: debug");
sInfo("--- sync log test: info");
sWarn("--- sync log test: warn");
sError("--- sync log test: error");
sFatal("--- sync log test: fatal");
}
SRaftCfg* createRaftCfg() {
SRaftCfg* pCfg = (SRaftCfg*)taosMemoryMalloc(sizeof(SRaftCfg));
memset(pCfg, 0, sizeof(SRaftCfg));
pCfg->cfg.replicaNum = 3;
pCfg->cfg.myIndex = 1;
for (int i = 0; i < pCfg->cfg.replicaNum; ++i) {
((pCfg->cfg.nodeInfo)[i]).nodePort = i * 100;
snprintf(((pCfg->cfg.nodeInfo)[i]).nodeFqdn, sizeof(((pCfg->cfg.nodeInfo)[i]).nodeFqdn), "100.200.300.%d", i);
}
pCfg->isStandBy = taosGetTimestampSec() % 100;
pCfg->batchSize = taosGetTimestampSec() % 100;
pCfg->configIndexCount = 5;
for (int i = 0; i < MAX_CONFIG_INDEX_COUNT; ++i) {
(pCfg->configIndexArr)[i] = -1;
}
for (int i = 0; i < pCfg->configIndexCount; ++i) {
(pCfg->configIndexArr)[i] = i * 100;
}
return pCfg;
}
SSyncCfg* createSyncCfg() {
SSyncCfg* pCfg = (SSyncCfg*)taosMemoryMalloc(sizeof(SSyncCfg));
memset(pCfg, 0, sizeof(SSyncCfg));
pCfg->replicaNum = 3;
pCfg->myIndex = 1;
for (int i = 0; i < pCfg->replicaNum; ++i) {
((pCfg->nodeInfo)[i]).nodePort = i * 100;
snprintf(((pCfg->nodeInfo)[i]).nodeFqdn, sizeof(((pCfg->nodeInfo)[i]).nodeFqdn), "100.200.300.%d", i);
}
return pCfg;
}
const char *pFile = "./raft_config_index.json";
void test1() {
int32_t code = raftCfgIndexCreateFile(pFile);
ASSERT(code == 0);
SRaftCfgIndex *pRaftCfgIndex = raftCfgIndexOpen(pFile);
raftCfgIndexLog2((char*)"==test1==", pRaftCfgIndex);
raftCfgIndexClose(pRaftCfgIndex);
}
void test2() {
SRaftCfgIndex *pRaftCfgIndex = raftCfgIndexOpen(pFile);
for (int i = 0; i < 500; ++i) {
raftCfgIndexAddConfigIndex(pRaftCfgIndex, i);
}
raftCfgIndexPersist(pRaftCfgIndex);
raftCfgIndexLog2((char*)"==test2==", pRaftCfgIndex);
raftCfgIndexClose(pRaftCfgIndex);
}
void test3() {
SRaftCfgIndex *pRaftCfgIndex = raftCfgIndexOpen(pFile);
raftCfgIndexLog2((char*)"==test3==", pRaftCfgIndex);
raftCfgIndexClose(pRaftCfgIndex);
}
int main() {
tsAsyncLog = 0;
sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
logTest();
test1();
test2();
test3();
return 0;
}

View File

@ -211,27 +211,27 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
#define CONN_PERSIST_TIME(para) ((para) <= 90000 ? 90000 : (para))
#define CONN_GET_HOST_THREAD(conn) (conn ? ((SCliConn*)conn)->hostThrd : NULL)
#define CONN_GET_INST_LABEL(conn) (((STrans*)(((SCliThrd*)(conn)->hostThrd)->pTransInst))->label)
#define CONN_SHOULD_RELEASE(conn, head) \
do { \
if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \
uint64_t ahandle = head->ahandle; \
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); \
transClearBuffer(&conn->readBuf); \
transFreeMsg(transContFromHead((char*)head)); \
if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) { \
SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0); \
if (cliMsg->type == Release) return; \
} \
tDebug("%s conn %p receive release request, ref:%d", CONN_GET_INST_LABEL(conn), conn, T_REF_VAL_GET(conn)); \
if (T_REF_VAL_GET(conn) > 1) { \
transUnrefCliHandle(conn); \
} \
destroyCmsg(pMsg); \
cliReleaseUnfinishedMsg(conn); \
transQueueClear(&conn->cliMsgs); \
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); \
return; \
} \
#define CONN_SHOULD_RELEASE(conn, head) \
do { \
if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \
uint64_t ahandle = head->ahandle; \
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); \
transClearBuffer(&conn->readBuf); \
transFreeMsg(transContFromHead((char*)head)); \
if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) { \
SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0); \
if (cliMsg->type == Release) return; \
} \
tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId); \
if (T_REF_VAL_GET(conn) > 1) { \
transUnrefCliHandle(conn); \
} \
destroyCmsg(pMsg); \
cliReleaseUnfinishedMsg(conn); \
transQueueClear(&conn->cliMsgs); \
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); \
return; \
} \
} while (0)
#define CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle) \
@ -890,8 +890,8 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
if (refId != 0) {
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId);
if (exh == NULL) {
tError("failed to get conn, refId: %" PRId64 "", refId);
*ignore = true;
destroyCmsg(pMsg);
return NULL;
} else {
conn = exh->handle;
@ -937,7 +937,16 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
bool ignore = false;
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
if (ignore == true) {
tError("ignore msg");
// persist conn already release by server
STransMsg resp = {0};
resp.code = TSDB_CODE_RPC_BROKEN_LINK;
resp.msgType = pMsg->msg.msgType + 1;
resp.info.ahandle = pMsg && pMsg->ctx ? pMsg->ctx->ahandle : NULL;
resp.info.traceId = pMsg->msg.info.traceId;
pTransInst->cfp(pTransInst->parent, &resp, NULL);
destroyCmsg(pMsg);
return;
}

View File

@ -1,62 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"create_table_thread_count": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1,
"timestamp_step": 1000,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}],
"tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}

View File

@ -1,81 +0,0 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_1")
tdSql.checkData(0, 0, 200)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 200000)
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename )
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,3 +0,0 @@
1
2
3
1 1
2 2
3 3

File diff suppressed because it is too large Load Diff

View File

@ -1,100 +0,0 @@
8.855,"binary_str0" ,1626870128248246976
8.75,"binary_str1" ,1626870128249060032
5.44,"binary_str2" ,1626870128249067968
8.45,"binary_str3" ,1626870128249072064
4.07,"binary_str4" ,1626870128249075904
6.97,"binary_str5" ,1626870128249078976
6.86,"binary_str6" ,1626870128249082048
1.585,"binary_str7" ,1626870128249085120
1.4,"binary_str8" ,1626870128249087936
5.135,"binary_str9" ,1626870128249092032
3.15,"binary_str10" ,1626870128249095104
1.765,"binary_str11" ,1626870128249097920
7.71,"binary_str12" ,1626870128249100992
3.91,"binary_str13" ,1626870128249104064
5.615,"binary_str14" ,1626870128249106880
9.495,"binary_str15" ,1626870128249109952
3.825,"binary_str16" ,1626870128249113024
1.94,"binary_str17" ,1626870128249117120
5.385,"binary_str18" ,1626870128249119936
7.075,"binary_str19" ,1626870128249123008
5.715,"binary_str20" ,1626870128249126080
1.83,"binary_str21" ,1626870128249128896
6.365,"binary_str22" ,1626870128249131968
6.55,"binary_str23" ,1626870128249135040
6.315,"binary_str24" ,1626870128249138112
3.82,"binary_str25" ,1626870128249140928
2.455,"binary_str26" ,1626870128249145024
7.795,"binary_str27" ,1626870128249148096
2.47,"binary_str28" ,1626870128249150912
1.37,"binary_str29" ,1626870128249155008
5.39,"binary_str30" ,1626870128249158080
5.13,"binary_str31" ,1626870128249160896
4.09,"binary_str32" ,1626870128249163968
5.855,"binary_str33" ,1626870128249167040
0.17,"binary_str34" ,1626870128249170112
1.955,"binary_str35" ,1626870128249173952
0.585,"binary_str36" ,1626870128249178048
0.33,"binary_str37" ,1626870128249181120
7.925,"binary_str38" ,1626870128249183936
9.685,"binary_str39" ,1626870128249187008
2.6,"binary_str40" ,1626870128249191104
5.705,"binary_str41" ,1626870128249193920
3.965,"binary_str42" ,1626870128249196992
4.43,"binary_str43" ,1626870128249200064
8.73,"binary_str44" ,1626870128249202880
3.105,"binary_str45" ,1626870128249205952
9.39,"binary_str46" ,1626870128249209024
2.825,"binary_str47" ,1626870128249212096
9.675,"binary_str48" ,1626870128249214912
9.99,"binary_str49" ,1626870128249217984
4.51,"binary_str50" ,1626870128249221056
4.94,"binary_str51" ,1626870128249223872
7.72,"binary_str52" ,1626870128249226944
4.135,"binary_str53" ,1626870128249231040
2.325,"binary_str54" ,1626870128249234112
4.585,"binary_str55" ,1626870128249236928
8.76,"binary_str56" ,1626870128249240000
4.715,"binary_str57" ,1626870128249243072
0.56,"binary_str58" ,1626870128249245888
5.35,"binary_str59" ,1626870128249249984
5.075,"binary_str60" ,1626870128249253056
6.665,"binary_str61" ,1626870128249256128
7.13,"binary_str62" ,1626870128249258944
2.775,"binary_str63" ,1626870128249262016
5.775,"binary_str64" ,1626870128249265088
1.62,"binary_str65" ,1626870128249267904
1.625,"binary_str66" ,1626870128249270976
8.15,"binary_str67" ,1626870128249274048
0.75,"binary_str68" ,1626870128249277120
3.265,"binary_str69" ,1626870128249280960
8.585,"binary_str70" ,1626870128249284032
1.88,"binary_str71" ,1626870128249287104
8.44,"binary_str72" ,1626870128249289920
5.12,"binary_str73" ,1626870128249295040
2.58,"binary_str74" ,1626870128249298112
9.42,"binary_str75" ,1626870128249300928
1.765,"binary_str76" ,1626870128249304000
2.66,"binary_str77" ,1626870128249308096
1.405,"binary_str78" ,1626870128249310912
5.595,"binary_str79" ,1626870128249315008
2.28,"binary_str80" ,1626870128249318080
9.24,"binary_str81" ,1626870128249320896
9.03,"binary_str82" ,1626870128249323968
6.055,"binary_str83" ,1626870128249327040
1.74,"binary_str84" ,1626870128249330112
5.77,"binary_str85" ,1626870128249332928
1.97,"binary_str86" ,1626870128249336000
0.3,"binary_str87" ,1626870128249339072
7.145,"binary_str88" ,1626870128249342912
0.88,"binary_str89" ,1626870128249345984
8.025,"binary_str90" ,1626870128249349056
4.81,"binary_str91" ,1626870128249351872
0.725,"binary_str92" ,1626870128249355968
3.85,"binary_str93" ,1626870128249359040
9.455,"binary_str94" ,1626870128249362112
2.265,"binary_str95" ,1626870128249364928
3.985,"binary_str96" ,1626870128249368000
9.375,"binary_str97" ,1626870128249371072
0.2,"binary_str98" ,1626870128249373888
6.95,"binary_str99" ,1626870128249377984
Can't render this file because it contains an unexpected character in line 1 and column 19.

View File

@ -1,100 +0,0 @@
"string0",7,8.615
"string1",4,9.895
"string2",3,2.92
"string3",3,5.62
"string4",7,1.615
"string5",6,1.45
"string6",5,7.48
"string7",7,3.01
"string8",5,4.76
"string9",10,7.09
"string10",2,8.38
"string11",7,8.65
"string12",5,5.025
"string13",10,5.765
"string14",2,4.57
"string15",2,1.03
"string16",7,6.98
"string17",10,0.23
"string18",7,5.815
"string19",1,2.37
"string20",10,8.865
"string21",3,1.235
"string22",2,8.62
"string23",9,1.045
"string24",8,4.34
"string25",1,5.455
"string26",2,4.475
"string27",1,6.95
"string28",2,3.39
"string29",3,6.79
"string30",7,9.735
"string31",1,9.79
"string32",10,9.955
"string33",1,5.095
"string34",3,3.86
"string35",9,5.105
"string36",10,4.22
"string37",1,2.78
"string38",9,6.345
"string39",1,0.975
"string40",5,6.16
"string41",4,7.735
"string42",5,6.6
"string43",8,2.845
"string44",1,0.655
"string45",3,2.995
"string46",9,3.6
"string47",8,3.47
"string48",3,7.98
"string49",6,2.225
"string50",9,5.44
"string51",4,6.335
"string52",3,2.955
"string53",1,0.565
"string54",6,5.575
"string55",6,9.905
"string56",9,6.025
"string57",8,0.94
"string58",10,0.15
"string59",8,1.555
"string60",4,2.28
"string61",2,8.29
"string62",9,6.22
"string63",6,3.35
"string64",10,6.7
"string65",3,9.345
"string66",7,9.815
"string67",1,5.365
"string68",10,3.81
"string69",1,6.405
"string70",8,2.715
"string71",3,8.58
"string72",8,6.34
"string73",2,7.49
"string74",4,8.64
"string75",3,8.995
"string76",7,3.465
"string77",1,7.64
"string78",6,3.65
"string79",6,1.4
"string80",6,5.875
"string81",2,1.22
"string82",5,7.87
"string83",9,8.41
"string84",9,8.9
"string85",9,3.89
"string86",2,5.0
"string87",2,4.495
"string88",4,2.835
"string89",3,5.895
"string90",7,8.41
"string91",5,5.125
"string92",7,9.165
"string93",5,8.315
"string94",10,7.485
"string95",7,4.635
"string96",2,6.015
"string97",8,0.595
"string98",3,8.79
"string99",4,1.72
1 string0 7 8.615
2 string1 4 9.895
3 string2 3 2.92
4 string3 3 5.62
5 string4 7 1.615
6 string5 6 1.45
7 string6 5 7.48
8 string7 7 3.01
9 string8 5 4.76
10 string9 10 7.09
11 string10 2 8.38
12 string11 7 8.65
13 string12 5 5.025
14 string13 10 5.765
15 string14 2 4.57
16 string15 2 1.03
17 string16 7 6.98
18 string17 10 0.23
19 string18 7 5.815
20 string19 1 2.37
21 string20 10 8.865
22 string21 3 1.235
23 string22 2 8.62
24 string23 9 1.045
25 string24 8 4.34
26 string25 1 5.455
27 string26 2 4.475
28 string27 1 6.95
29 string28 2 3.39
30 string29 3 6.79
31 string30 7 9.735
32 string31 1 9.79
33 string32 10 9.955
34 string33 1 5.095
35 string34 3 3.86
36 string35 9 5.105
37 string36 10 4.22
38 string37 1 2.78
39 string38 9 6.345
40 string39 1 0.975
41 string40 5 6.16
42 string41 4 7.735
43 string42 5 6.6
44 string43 8 2.845
45 string44 1 0.655
46 string45 3 2.995
47 string46 9 3.6
48 string47 8 3.47
49 string48 3 7.98
50 string49 6 2.225
51 string50 9 5.44
52 string51 4 6.335
53 string52 3 2.955
54 string53 1 0.565
55 string54 6 5.575
56 string55 6 9.905
57 string56 9 6.025
58 string57 8 0.94
59 string58 10 0.15
60 string59 8 1.555
61 string60 4 2.28
62 string61 2 8.29
63 string62 9 6.22
64 string63 6 3.35
65 string64 10 6.7
66 string65 3 9.345
67 string66 7 9.815
68 string67 1 5.365
69 string68 10 3.81
70 string69 1 6.405
71 string70 8 2.715
72 string71 3 8.58
73 string72 8 6.34
74 string73 2 7.49
75 string74 4 8.64
76 string75 3 8.995
77 string76 7 3.465
78 string77 1 7.64
79 string78 6 3.65
80 string79 6 1.4
81 string80 6 5.875
82 string81 2 1.22
83 string82 5 7.87
84 string83 9 8.41
85 string84 9 8.9
86 string85 9 3.89
87 string86 2 5.0
88 string87 2 4.495
89 string88 4 2.835
90 string89 3 5.895
91 string90 7 8.41
92 string91 5 5.125
93 string92 7 9.165
94 string93 5 8.315
95 string94 10 7.485
96 string95 7 4.635
97 string96 2 6.015
98 string97 8 0.595
99 string98 3 8.79
100 string99 4 1.72

View File

@ -1,63 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb3",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "ms",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}

View File

@ -1,63 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb1",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}

View File

@ -1,63 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "testdb2",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "us",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}

View File

@ -1,115 +0,0 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# check the params of taosdemo about time_step is nano
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json -y " % binPath)
tdSql.execute("use testdb1")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.getData(9, 1)
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.000099000")
# check the params of taosdemo about time_step is us
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json -y " % binPath)
tdSql.execute("use testdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.getData(9, 1)
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:00:00.099000")
# check the params of taosdemo about time_step is ms
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json -y " % binPath)
tdSql.execute("use testdb3")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(9, 1,"TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0,"2021-07-01 00:01:39.000")
os.system("rm -rf ./res.txt")
os.system("rm -rf ./*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,88 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdb",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}

View File

@ -1,84 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "subnsdb",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
}]
}]
}

View File

@ -1,62 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdb2",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "now",
"sample_format": "",
"sample_file": "",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":2}, {"type": "BINARY", "len": 32, "count":2},
{"type": "TIMESTAMP"}, {"type": "BIGINT", "count":3},{"type": "FLOAT", "count":1},{"type": "SMALLINT", "count":1},{"type": "TINYINT", "count":1},
{"type": "BOOL"},{"type": "NCHAR","len":16}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5},{"type": "NCHAR","len":16, "count":1}]
}]
}]
}

View File

@ -1,84 +0,0 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "nsdbcsv",
"drop": "yes",
"replica": 1,
"cache": 50,
"precision": "ns",
"keep": 3600,
"minRows": 100,
"maxRows": 4096,
"comp":2
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb0_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "tb1_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "samples",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 1000,
"timestamp_step": 10000000,
"start_timestamp": "2021-07-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/NanoTestCase/nano_samples.csv",
"tags_file": "./tools/taosdemoAllTest/NanoTestCase/nano_sampletags.csv",
"columns": [{"type": "DOUBLE"}, {"type": "BINARY", "len": 64, "count":1}, {"type": "TIMESTAMP", "count":1}],
"tags": [{"type": "BINARY", "len": 16, "count":1},{"type": "INT"},{"type": "DOUBLE"}]
}]
}]
}

View File

@ -1,169 +0,0 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# insert data from a special timestamp
# check stable stb0
os.system(
"%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " %
binPath)
tdSql.execute("use nsdb")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(9, 1, "TIMESTAMP")
tdSql.query("select last(ts) from stb0")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000")
# check stable stb1 which is insert with disord
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb1_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb1")
tdSql.checkDataType(9, 1, "TIMESTAMP")
# check insert timestamp_step is nano_second
tdSql.query("select last(ts) from stb1")
tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000")
# insert data from now time
# check stable stb0
os.system(
"%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " %
binPath)
tdSql.execute("use nsdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from tb0_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb0")
tdSql.checkDataType(9, 1, "TIMESTAMP")
# insert by csv files and timetamp is long int , strings in ts and
# cols
os.system(
"%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " %
binPath)
tdSql.execute("use nsdbcsv")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(3, 1, "TIMESTAMP")
tdSql.query(
"select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"")
tdSql.checkData(0, 0, 5000)
tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
tdSql.checkData(0, 0, 10000)
os.system("rm -rf ./insert_res.txt")
os.system(
"rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql")
# taosdemo test insert with command and parameter , detals show
# taosdemo --help
os.system(
"%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " %
binPath)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 600)
# check taosdemo -s
sqls_ls = [
'drop database if exists nsdbsql;',
'create database nsdbsql precision "ns" keep 3600 duration 6 update 1;',
'use nsdbsql;',
'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);',
'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);']
with open("./taosdemoTestNanoCreateDB.sql", mode="a") as sql_files:
for sql in sqls_ls:
sql_files.write(sql + "\n")
sql_files.close()
sleep(10)
os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath)
tdSql.query("select count(*) from nsdbsql.meters")
tdSql.checkData(0, 0, 2)
os.system("rm -rf ./res.txt")
os.system("rm -rf ./*.py.sql")
os.system("rm -rf ./taosdemoTestNanoCreateDB.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,92 +0,0 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "nsdb",
"query_times": 10,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 1,
"concurrent": 2,
"sqls": [
{
"sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000 \" ;",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb0 where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from stb0 where ts>now-20d ;",
"result": "./query_res2.txt"
},
{
"sql": "select max(c10) from stb0;",
"result": "./query_res3.txt"
},
{
"sql": "select min(c1) from stb0;",
"result": "./query_res4.txt"
},
{
"sql": "select avg(c1) from stb0;",
"result": "./query_res5.txt"
},
{
"sql":"select count(*) from stb0 group by tbname;",
"result":"./query_res6.txt"
}
]
},
"super_table_query": {
"stblname": "stb0",
"query_interval": 0,
"threads": 4,
"sqls": [
{
"sql": "select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000 \" ;",
"result": "./query_res_tb0.txt"
},
{
"sql":"select count(*) from xxxx where ts>\"2021-07-01 00:01:00.000000000\" and ts <=\"2021-07-01 00:01:10.000000000\" ;",
"result": "./query_res_tb1.txt"
},
{
"sql":"select first(*) from xxxx ;",
"result": "./query_res_tb2.txt"
},
{
"sql":"select last(*) from xxxx;",
"result": "./query_res_tb3.txt"
},
{
"sql":"select last_row(*) from xxxx ;",
"result": "./query_res_tb4.txt"
},
{
"sql":"select max(c10) from xxxx ;",
"result": "./query_res_tb5.txt"
},
{
"sql":"select min(c1) from xxxx ;",
"result": "./query_res_tb6.txt"
},
{
"sql":"select avg(c10) from xxxx ;",
"result": "./query_res_tb7.txt"
}
]
}
}

View File

@ -1,157 +0,0 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# query: query test for nanoSecond with where and max min groupby order
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath)
tdSql.execute("use nsdb")
# use where to filter
tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.590000000 \" ")
tdSql.checkData(0, 0, 4000)
tdSql.query("select count(*) from stb0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
tdSql.checkData(0, 0, 5900)
tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.590000000 \" ;")
tdSql.checkData(0, 0, 40)
tdSql.query("select count(*) from tb0_0 where ts>\"2021-07-01 00:00:00.000000000\" and ts <=\"2021-07-01 00:00:00.590000000\" ")
tdSql.checkData(0, 0, 59)
# select max min avg from special col
tdSql.query("select max(c10) from stb0;")
print("select max(c10) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select max(c10) from tb0_0;")
print("select max(c10) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select min(c1) from stb0;")
print( "select min(c1) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select min(c1) from tb0_0;")
print( "select min(c1) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select avg(c1) from stb0;")
print( "select avg(c1) from stb0 : " , tdSql.getData(0, 0))
tdSql.query("select avg(c1) from tb0_0;")
print( "select avg(c1) from tb0_0 : " , tdSql.getData(0, 0))
tdSql.query("select count(*) from stb0 group by tbname;")
tdSql.checkData(0, 0, 100)
tdSql.checkData(10, 0, 100)
# query : query above sqls by taosdemo and continuously
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuery.json -y " % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath)
tdSql.execute("use nsdbcsv")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(3, 1, "TIMESTAMP")
tdSql.query("select count(*) from stb0 where ts >\"2021-07-01 00:00:00.490000000\"")
tdSql.checkData(0, 0, 5000)
tdSql.query("select count(*) from stb0 where ts <now -1d-1h-3s")
tdSql.checkData(0, 0, 10000)
tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
tdSql.checkData(0, 0, 10000)
tdSql.execute('select count(*) from stb0 where c2 > 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 < 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 = 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 != 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 <> 162687012800000000')
tdSql.execute('select count(*) from stb0 where c2 > "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 < "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 = "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 != "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where c2 <> "2021-07-21 20:22:08.248246976"')
tdSql.execute('select count(*) from stb0 where ts between "2021-07-01 00:00:00.000000000" and "2021-07-01 00:00:00.990000000"')
tdSql.execute('select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000')
tdSql.query('select avg(c0) from stb0 interval(5000000000b)')
tdSql.checkRows(1)
tdSql.query('select avg(c0) from stb0 interval(100000000b)')
tdSql.checkRows(10)
tdSql.error('select avg(c0) from stb0 interval(1b)')
tdSql.error('select avg(c0) from stb0 interval(999b)')
tdSql.query('select avg(c0) from stb0 interval(1000b)')
tdSql.checkRows(100)
tdSql.query('select avg(c0) from stb0 interval(1u)')
tdSql.checkRows(100)
tdSql.query('select avg(c0) from stb0 interval(100000000b) sliding (100000000b)')
tdSql.checkRows(10)
# query : query above sqls by taosdemo and continuously
os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoQuerycsv.json -y " % binPath)
os.system("rm -rf ./query_res*.txt*")
os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/*.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,110 +0,0 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "nsdbcsv",
"query_times": 10,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 1,
"concurrent": 2,
"sqls": [
{
"sql": "select count(*) from stb0 where ts> \"2021-07-01 00:00:00.490000000\" ;",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb0 where ts < now -22d-1h-3s ;",
"result": "./query_res1.txt"
},
{
"sql": "select count(*) from stb0 where ts < 1626918583000000000 ;",
"result": "./query_res2.txt"
},
{
"sql": "select count(*) from stb0 where c2 <> 162687012800000000;",
"result": "./query_res3.txt"
},
{
"sql": "select count(*) from stb0 where c2 != \"2021-07-21 20:22:08.248246976\";",
"result": "./query_res4.txt"
},
{
"sql": "select count(*) from stb0 where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\";",
"result": "./query_res5.txt"
},
{
"sql":"select count(*) from stb0 group by tbname;",
"result":"./query_res6.txt"
},
{
"sql":"select count(*) from stb0 where ts between 1625068800000000000 and 1625068801000000000;",
"result":"./query_res7.txt"
},
{
"sql":"select avg(c0) from stb0 interval(5000000000b);",
"result":"./query_res8.txt"
},
{
"sql":"select avg(c0) from stb0 interval(100000000b) sliding (100000000b);",
"result":"./query_res9.txt"
}
]
},
"super_table_query": {
"stblname": "stb0",
"query_interval": 0,
"threads": 4,
"sqls": [
{
"sql": "select count(*) from xxxx where ts > \"2021-07-01 00:00:00.490000000\" ;",
"result": "./query_res_tb0.txt"
},
{
"sql":"select count(*) from xxxx where ts between \"2021-07-01 00:00:00.000000000\" and \"2021-07-01 00:00:00.990000000\" ;",
"result": "./query_res_tb1.txt"
},
{
"sql":"select first(*) from xxxx ;",
"result": "./query_res_tb2.txt"
},
{
"sql":"select last(*) from xxxx;",
"result": "./query_res_tb3.txt"
},
{
"sql":"select last_row(*) from xxxx ;",
"result": "./query_res_tb4.txt"
},
{
"sql":"select max(c0) from xxxx ;",
"result": "./query_res_tb5.txt"
},
{
"sql":"select min(c0) from xxxx ;",
"result": "./query_res_tb6.txt"
},
{
"sql":"select avg(c0) from xxxx ;",
"result": "./query_res_tb7.txt"
},
{
"sql":"select avg(c0) from xxxx interval(100000000b) sliding (100000000b) ;",
"result": "./query_res_tb8.txt"
}
]
}
}

View File

@ -1,32 +0,0 @@
{
"filetype":"subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "subnsdb",
"confirm_parameter_prompt": "no",
"specified_table_query":
{
"concurrent":2,
"mode":"sync",
"interval":10000,
"restart":"yes",
"keepProgress":"yes",
"sqls": [
{
"sql": "select * from stb0 where ts>= \"2021-07-01 00:00:00.000000000\" ;",
"result": "./subscribe_res0.txt"
},
{
"sql": "select * from stb0 where ts < now -2d-1h-3s ;",
"result": "./subscribe_res1.txt"
},
{
"sql": "select * from stb0 where ts < 1626918583000000000 ;",
"result": "./subscribe_res2.txt"
}]
}
}

Some files were not shown because too many files have changed in this diff Show More