diff --git a/README-CN.md b/README-CN.md index 2b1790f4bb..4931c0177e 100644 --- a/README-CN.md +++ b/README-CN.md @@ -45,7 +45,7 @@ TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。 TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。 -为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.0.2 或者更高版本。 +为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。 ## 安装工具 @@ -124,7 +124,7 @@ brew install argp-standalone gflags pkgconfig TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。 -请使用 1.14 及以上版本。对于中国用户,我们建议使用代理来加速软件包下载。 +请使用 1.20 及以上版本。对于中国用户,我们建议使用代理来加速软件包下载。 ``` go env -w GO111MODULE=on diff --git a/README.md b/README.md index a8c20ea3f6..31d3a8bf67 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ You can choose to install through source code, [container](https://docs.tdengine TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. -To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in the project directory. +To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. ## Install build tools @@ -131,7 +131,7 @@ brew install argp-standalone gflags pkgconfig TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup. -Please use version 1.14+. For the user in China, we recommend using a proxy to accelerate package downloading. +Please use version 1.20+. For the user in China, we recommend using a proxy to accelerate package downloading. ``` go env -w GO111MODULE=on diff --git a/cmake/cmake.define b/cmake/cmake.define index 73f9497809..12e1b50539 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0) -set(CMAKE_VERBOSE_MAKEFILE TRUE) +set(CMAKE_VERBOSE_MAKEFILE FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE) #set output directory diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index ef6ed4af1d..13826a1a74 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 3.0 + GIT_TAG main SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/docs/en/01-index.md b/docs/en/01-index.md index 9fe5e7800a..54271659ec 100644 --- a/docs/en/01-index.md +++ b/docs/en/01-index.md @@ -19,7 +19,9 @@ TDengine uses ubiquitous SQL as its query language, which greatly reduces learni If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section. -If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter. +If you want to know more about TDengine tools and the REST API, please see the [Reference](./reference) chapter. + +For information about connecting to TDengine with different programming languages, see [Client Libraries](./client-libraries/). If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully. diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index f9fe68b47a..28b94a5236 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -37,27 +37,26 @@ The major features are listed below: - Provides an interactive [Command Line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries. - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine. 10. Programming - - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages. + - Provides [client libraries](../client-libraries/) for [C/C++](../client-libraries/cpp), [Java](../client-libraries/java), [Python](../client-libraries/python), [Go](../client-libraries/go), [Rust](../client-libraries/rust), [Node.js](../client-libraries/node) and other programming languages. - Provides a [REST API](../reference/rest-api/). For more details on features, please read through the entire documentation. ## Competitive Advantages -By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages. +By making full use of [characteristics of time series data](https://tdengine.com/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases with the following advantages. -- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. +- **[High-Performance](https://tdengine.com/high-performance/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. -- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. +- **[Simplified Solution](https://tdengine.com/comprehensive-industrial-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. -- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds. +- **[Cloud Native](https://tdengine.com/cloud-native/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds. -- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[ - ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. +- **[Ease of Use](https://tdengine.com/easy-to-use/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. -- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. +- **[Easy Data Analytics](https://tdengine.com/simplifying-time-series-analysis-for-data-scientists/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. -- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide. +- **[Open Source](https://tdengine.com/open-source/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 22k stars on GitHub. There is an active developer community, and over 400k running instances worldwide. With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. @@ -125,16 +124,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine - [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/) - [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/) -- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/) -- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/) - -## More readings -- [Introduction to Time-Series Database](https://tdengine.com/tsdb/) -- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/) - ## Products -There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to -- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro) -- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn) +For information about our paid offerings, see: +- [TDengine Enterprise](https://tdengine.com/enterprise/) +- [TDengine Cloud](https://cloud.tdengine.com) \ No newline at end of file diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 723194a325..761faf4a05 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -30,7 +30,7 @@ And then run the following command: docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine ``` -Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed. +Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connections. You can open these ports as needed. If you need to persist data to a specific directory on your local machine, please run the following command: ```shell diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md index d34df2c970..c6f3e60932 100644 --- a/docs/en/05-get-started/03-package.md +++ b/docs/en/05-get-started/03-package.md @@ -14,9 +14,9 @@ This document describes how to install TDengine on Linux/Windows/macOS and perfo - To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). - If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine). -The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter). +The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to client libraries for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter). -The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector. +The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ client library. TDengine OSS is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS. diff --git a/docs/en/05-get-started/_pkg_install.mdx b/docs/en/05-get-started/_pkg_install.mdx index 2372d2ff26..768cc7cd74 100644 --- a/docs/en/05-get-started/_pkg_install.mdx +++ b/docs/en/05-get-started/_pkg_install.mdx @@ -2,7 +2,7 @@ import PkgList from "/components/PkgList"; TDengine is easy to download and install. -The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector. +The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ client library. You can download the TDengine installation package in .rpm, .deb, or .tar.gz format. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. @@ -10,7 +10,7 @@ Between official releases, beta versions may be released that contain new featur -For information about installing TDengine, see [Install and Uninstall](../../operation/pkg-install). +For information about installing TDengine, see [Install and Uninstall](../operation/pkg-install). For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads) diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md index 5c4ab59f75..697f98af15 100644 --- a/docs/en/05-get-started/index.md +++ b/docs/en/05-get-started/index.md @@ -12,7 +12,7 @@ import StackOverflowSVG from './stackoverflow.svg' You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud. -The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](../reference/rest-api) through [taosAdapter](../reference/taosadapter). +The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to client libraries for multiple languages, TDengine also provides a [RESTful interface](../reference/rest-api) through [taosAdapter](../reference/taosadapter). ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/en/07-develop/01-connect/_connect_java.mdx b/docs/en/07-develop/01-connect/_connect_java.mdx index fda86f2221..4d29e24911 100644 --- a/docs/en/07-develop/01-connect/_connect_java.mdx +++ b/docs/en/07-develop/01-connect/_connect_java.mdx @@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si {{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} ``` -More configuration about connection, please refer to [Java Connector](../../reference/connector/java) +More configuration about connection, please refer to [Java Client Library](../../client-libraries/java) diff --git a/docs/en/07-develop/01-connect/_connect_rust.mdx b/docs/en/07-develop/01-connect/_connect_rust.mdx index 80ac1f4ff4..5746968263 100644 --- a/docs/en/07-develop/01-connect/_connect_rust.mdx +++ b/docs/en/07-develop/01-connect/_connect_rust.mdx @@ -3,6 +3,6 @@ ``` :::note -For Rust connector, the connection depends on the feature being used. If "rest" feature is enabled, then only the implementation for "rest" is compiled and packaged. +For Rust client library, the connection depends on the feature being used. If "rest" feature is enabled, then only the implementation for "rest" is compiled and packaged. ::: diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md index 11375bd060..2f826920ad 100644 --- a/docs/en/07-develop/01-connect/index.md +++ b/docs/en/07-develop/01-connect/index.md @@ -1,7 +1,7 @@ --- title: Connect to TDengine sidebar_label: Connect -description: This document describes how to establish connections to TDengine and how to install and use TDengine connectors. +description: This document describes how to establish connections to TDengine and how to install and use TDengine client libraries. --- import Tabs from "@theme/Tabs"; @@ -15,28 +15,28 @@ import ConnCSNative from "./_connect_cs.mdx"; import ConnC from "./_connect_c.mdx"; import ConnR from "./_connect_r.mdx"; import ConnPHP from "./_connect_php.mdx"; -import InstallOnLinux from "../../14-reference/03-connector/_linux_install.mdx"; -import InstallOnWindows from "../../14-reference/03-connector/_windows_install.mdx"; -import InstallOnMacOS from "../../14-reference/03-connector/_macos_install.mdx"; -import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx"; -import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx"; -import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx"; +import InstallOnLinux from "../../08-client-libraries/_linux_install.mdx"; +import InstallOnWindows from "../../08-client-libraries/_windows_install.mdx"; +import InstallOnMacOS from "../../08-client-libraries/_macos_install.mdx"; +import VerifyLinux from "../../08-client-libraries/_verify_linux.mdx"; +import VerifyWindows from "../../08-client-libraries/_verify_windows.mdx"; +import VerifyMacOS from "../../08-client-libraries/_verify_macos.mdx"; -Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](../../reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. +Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](../../reference/rest-api/). Applications can also use the client libraries for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These client libraries support connecting to TDengine clusters using both native interfaces (taosc). Some client libraries also support connecting over a REST interface. Community developers have also contributed several unofficial client libraries, such as the ADO.NET, Lua, and PHP libraries. ## Establish Connection -There are two ways for a connector to establish connections to TDengine: +There are two ways for a client library to establish connections to TDengine: 1. REST connection through the REST API provided by the taosAdapter component. 2. Native connection through the TDengine client driver (taosc). -For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users. +For REST and native connections, client libraries provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users. Key differences: 3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. -1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../reference/connector/cpp#parameter-binding-api), [Subscription](../../reference/connector/cpp#subscription-and-consumption-api), etc. +1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../client-libraries/cpp#parameter-binding-api), [Subscription](../../client-libraries/cpp#subscription-and-consumption-api), etc. ## Install Client Driver taosc @@ -72,7 +72,7 @@ After the above installation and configuration are done and making sure TDengine -## Install Connectors +## Install Client Library @@ -131,7 +131,7 @@ libtaos = { version = "0.4.2"} ``` :::info -Rust connector uses different features to distinguish the way to establish connection. To establish REST connection, please enable `rest` feature. +Rust client library uses different features to distinguish the way to establish connection. To establish REST connection, please enable `rest` feature. ```toml libtaos = { version = "*", features = ["rest"] } @@ -142,9 +142,9 @@ libtaos = { version = "*", features = ["rest"] } -Node.js connector provides different ways of establishing connections by providing different packages. +Node.js client library provides different ways of establishing connections by providing different packages. -1. Install Node.js Native Connector +1. Install Node.js Native Client Library ``` npm install @tdengine/client @@ -154,7 +154,7 @@ npm install @tdengine/client It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`. ::: -2. Install Node.js REST Connector +2. Install Node.js REST Client Library ``` npm install @tdengine/rest @@ -207,7 +207,7 @@ install.packages("RJDBC") -If the client driver (taosc) is already installed, then the C connector is already available. +If the client driver (taosc) is already installed, then the C client library is already available.
diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index 1e719283cc..b9ec36e3ac 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -24,7 +24,7 @@ import PhpStmt from "./_php_stmt.mdx"; ## Introduction -Application programs can execute `INSERT` statement through connectors to insert rows. The TDengine CLI can also be used to manually insert data. +Application programs can execute `INSERT` statement through client libraries to insert rows. The TDengine CLI can also be used to manually insert data. ### Insert Single Row The below SQL statement is used to insert one row into table "d1001". diff --git a/docs/en/07-develop/03-insert-data/60-high-volume.md b/docs/en/07-develop/03-insert-data/60-high-volume.md index c15b3213bb..8e9a788d22 100644 --- a/docs/en/07-develop/03-insert-data/60-high-volume.md +++ b/docs/en/07-develop/03-insert-data/60-high-volume.md @@ -377,7 +377,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please - TDengine client driver has been installed - Python3 has been installed, the the version >= 3.8 - - TDengine Python connector `taospy` has been installed + - TDengine Python client library `taospy` has been installed 2. Install faster-fifo to replace python builtin multiprocessing.Queue @@ -434,7 +434,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please :::note -Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue. +Don't establish connection to TDengine in the parent process if using Python client library in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue. ::: diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx index 70b1140748..e44161d397 100644 --- a/docs/en/07-develop/04-query-data/index.mdx +++ b/docs/en/07-develop/04-query-data/index.mdx @@ -1,6 +1,6 @@ --- title: Query Data -description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using connectors. +description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using client libraries. --- import Tabs from "@theme/Tabs"; @@ -19,7 +19,7 @@ import CAsync from "./_c_async.mdx"; ## Introduction -SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: +SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or client libraries. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine: - Query on single column or multiple columns - Filter on tags or data columns: >, <, =, <\>, like diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index b920d61a59..281e9e6750 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. -The following are some explanations about data subscription, which require some understanding of the architecture of TDengine and the use of various language linker interfaces. +The following are some explanations about data subscription, which require some understanding of the architecture of TDengine and the use of various language linker interfaces(you can learn it when you need it). - A consumption group consumes all data under the same topic, and different consumption groups are independent of each other; - A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data; - On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups; @@ -45,12 +45,13 @@ The following are some explanations about data subscription, which require some This document does not provide any further introduction to the knowledge of message queues themselves. If you need to know more, please search for it yourself. +Note: Starting from version 3.2.0.0, data subscription supports vnode migration and splitting. Due to the dependence of data subscription on wal files, wal does not synchronize during vnode migration and splitting. Therefore, after migration or splitting, wal data that has not been consumed before cannot be consumed. So please ensure that all data has been consumed before proceeding with vnode migration or splitting, otherwise data loss may occur during consumption. ## Data Schema and API -The related schemas and APIs in various languages are described as follows: +The related schemas and APIs in various languages are described as follows(Note that the consumer structure is not thread safe. When using a consumer on one thread, do not close the consumer on another thread): diff --git a/docs/en/07-develop/index.md b/docs/en/07-develop/index.md index 4ed5e8c19f..da020d53d5 100644 --- a/docs/en/07-develop/index.md +++ b/docs/en/07-develop/index.md @@ -5,7 +5,7 @@ description: This document describes how to use the various components of TDengi Before creating an application to process time-series data with TDengine, consider the following: -1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages. +1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has client libraries for a variety of languages. 2. Design the data model based on your own use cases. Consider the main [concepts](../concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data. 3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. 4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL. @@ -14,7 +14,7 @@ Before creating an application to process time-series data with TDengine, consid 7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. 8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem. -This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](../taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](../reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](../third-party/). +This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](../taos-sql/). For a more in-depth understanding of the use of each client library, please read the [Client Library Reference Guide](../client-libraries/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](../third-party/). If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away. diff --git a/docs/en/14-reference/03-connector/03-cpp.mdx b/docs/en/08-client-libraries/03-cpp.mdx similarity index 97% rename from docs/en/14-reference/03-connector/03-cpp.mdx rename to docs/en/08-client-libraries/03-cpp.mdx index 27adb58c12..80014ef3bf 100644 --- a/docs/en/14-reference/03-connector/03-cpp.mdx +++ b/docs/en/08-client-libraries/03-cpp.mdx @@ -1,10 +1,10 @@ --- -title: C/C++ Connector +title: C/C++ Client Library sidebar_label: C/C++ -description: This document describes the TDengine C/C++ connector. +description: This document describes the TDengine C/C++ client library. --- -C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. +C/C++ developers can use TDengine's client driver and the C/C++ client library, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ client library you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located. ```c #include @@ -263,7 +263,7 @@ typedef struct taosField { Get the reason for the last API call failure. The return value is the error code. :::note -TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. +TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C client library can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection. ::: @@ -394,7 +394,7 @@ The specific functions related to the interface are as follows (see also the [pr ### Schemaless Writing API -In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](../../schemaless/), and the C/C++ API used with it is described here. +In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](../../reference/schemaless/), and the C/C++ API used with it is described here. - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/08-client-libraries/04-java.mdx similarity index 96% rename from docs/en/14-reference/03-connector/04-java.mdx rename to docs/en/08-client-libraries/04-java.mdx index 039395cc30..9feab378e0 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/08-client-libraries/04-java.mdx @@ -1,16 +1,16 @@ --- -title: TDengine Java Connector +title: TDengine Java Client Library sidebar_label: Java -description: This document describes the TDengine Java Connector. +description: This document describes the TDengine Java client library. toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -`taos-jdbcdriver` is the official Java connector for TDengine. Java developers can use it to develop applications that access data in TDengine. `taos-jdbcdriver` implements standard JDBC driver interfaces and two connection methods: One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The second is **REST connection** which is implemented through taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. +`taos-jdbcdriver` is the official Java client library for TDengine. Java developers can use it to develop applications that access data in TDengine. `taos-jdbcdriver` implements standard JDBC driver interfaces and two connection methods: One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The second is **REST connection** which is implemented through taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. -![TDengine Database Connector Java](tdengine-jdbc-connector.webp) +![TDengine Java client library](tdengine-jdbc-connector.webp) The preceding figure shows the two ways in which a Java application can access TDengine. @@ -72,7 +72,7 @@ try (Statement statement = connection.createStatement()) { } ``` -There are four types of error codes that the JDBC connector can report: +There are four types of error codes that the JDBC client library can report: - Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), - Error code of the native connection method (error code between 0x2351 and 0x2360) @@ -123,7 +123,7 @@ For specific error codes, please refer to. | 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter | | 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism | -- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) +- [TDengine Java client library](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) ## TDengine DataType vs. Java DataType @@ -148,21 +148,21 @@ TDengine currently supports timestamp, number, character, Boolean type, and the **Note**: Only TAG supports JSON types Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead. -GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../../taos-sql/data-type/) -For WKB specifications, please refer to [Well Known Binary (WKB)]( https://libgeos.org/specifications/wkb/ ) -For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) +GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../taos-sql/data-type/) +For WKB specifications, please refer to [Well Known Binary (WKB)](https://libgeos.org/specifications/wkb/) +For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) ## Installation Steps ### Pre-installation preparation -Before using Java Connector to connect to the database, the following conditions are required. +Before using Java client library to connect to the database, the following conditions are required. - Java 1.8 or above runtime environment and Maven 3.6 or above installed - TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](../#Install-Client-Driver) -### Install the connectors +### Install the client library @@ -186,7 +186,7 @@ Add following dependency in the `pom.xml` file of your Maven project: -You can build Java connector from source code after cloning the TDengine project: +You can build Java client library from source code after cloning the TDengine project: ```shell git clone https://github.com/taosdata/taos-connector-jdbc.git @@ -269,7 +269,7 @@ In the above example, JDBC uses the client's configuration file to establish a c In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally. -The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, the default path is `C://TDengine/cfg/taos.cfg` on Windows, and the default path is `/etc/taos/taos.cfg` on macOS. +The configuration file here refers to the configuration file on the machine where the application that calls the JDBC client library is located, the default path is `/etc/taos/taos.cfg` on Linux, the default path is `C://TDengine/cfg/taos.cfg` on Windows, and the default path is `/etc/taos/taos.cfg` on macOS. @@ -368,7 +368,7 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. - For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](../../config/#Client-Only). + For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](../../reference/config/#Client-Only). ### Priority of configuration parameters @@ -977,7 +977,7 @@ public void setTagGeometry(int index, byte[] value) ### Schemaless Writing -TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../reference/schemaless/). @@ -1057,7 +1057,7 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO ### Data Subscription -The TDengine Java Connector supports subscription functionality with the following application API. +The TDengine Java client library supports subscription functionality with the following application API. #### Create a Topic @@ -1094,7 +1094,7 @@ TaosConsumer consumer = new TaosConsumer<>(config); - httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type. - messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type. - httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type. -- For more information, see [Consumer Parameters](../../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0. +- For more information, see [Consumer Parameters](../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0. #### Subscribe to consume data @@ -1172,14 +1172,14 @@ consumer.unsubscribe(); consumer.close() ``` -For more information, see [Data Subscription](../../../develop/tmq). +For more information, see [Data Subscription](../../develop/tmq). #### Full Sample Code -In addition to the native connection, the Java Connector also supports subscribing via websocket. +In addition to the native connection, the Java client library also supports subscribing via websocket. ```java public abstract class ConsumerLoop { @@ -1461,7 +1461,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`: **Solution**: Use taos-jdbcdriver 3.0.2. -For additional troubleshooting, see [FAQ](../../../train-faq/faq). +For additional troubleshooting, see [FAQ](../../train-faq/faq). ## API Reference diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/08-client-libraries/05-go.mdx similarity index 97% rename from docs/en/14-reference/03-connector/05-go.mdx rename to docs/en/08-client-libraries/05-go.mdx index 33f7a93439..11930e2429 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/08-client-libraries/05-go.mdx @@ -1,20 +1,20 @@ --- -title: TDengine Go Connector +title: TDengine Go Client Library sidebar_label: Go -description: This document describes the TDengine Go connector. +description: This document describes the TDengine Go client library. toc_max_heading_level: 4 --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx" -import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx" -import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx" -import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx" -import GoQuery from "../../07-develop/04-query-data/_go.mdx" +import GoInsert from "../07-develop/03-insert-data/_go_sql.mdx" +import GoInfluxLine from "../07-develop/03-insert-data/_go_line.mdx" +import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx" +import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx" +import GoQuery from "../07-develop/04-query-data/_go.mdx" -`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. +`driver-go` is the official Go language client library for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data. `driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection. @@ -81,7 +81,7 @@ Configure the environment variables and check the command. * ```go env``` * ```gcc -v``` -### Install the connectors +### Install the client library 1. Initialize the project with the `go mod` command. @@ -222,11 +222,11 @@ func main() { ### Specify the URL and Properties to get the connection -The Go connector does not support this feature +The Go client library does not support this feature ### Priority of configuration parameters -The Go connector does not support this feature +The Go client library does not support this feature ## Usage examples @@ -769,7 +769,7 @@ You can get the unique id by `common.GetReqID()`. ### Data Subscription -The TDengine Go Connector supports subscription functionality with the following application API. +The TDengine Go client library supports subscription functionality with the following application API. #### Create a Topic diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/08-client-libraries/06-rust.mdx similarity index 93% rename from docs/en/14-reference/03-connector/06-rust.mdx rename to docs/en/08-client-libraries/06-rust.mdx index 0981df6724..8fa5c946aa 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/08-client-libraries/06-rust.mdx @@ -1,7 +1,7 @@ --- -title: TDengine Rust Connector +title: TDengine Rust Client Library sidebar_label: Rust -description: This document describes the TDengine Rust connector. +description: This document describes the TDengine Rust client library. toc_max_heading_level: 4 --- @@ -9,18 +9,18 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Preparition from "./_preparation.mdx" -import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx" -import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx" -import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx" -import RustQuery from "../../07-develop/04-query-data/_rust.mdx" +import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx" +import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx" +import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx" +import RustQuery from "../07-develop/04-query-data/_rust.mdx" [![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos) -`taos` is the official Rust connector for TDengine. Rust developers can develop applications to access the TDengine instance data. +`taos` is the official Rust client library for TDengine. Rust developers can develop applications to access the TDengine instance data. `taos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is the **WebSocket connection**, which connects to TDengine instances via the WebSocket interface provided by taosAdapter. You can specify a connection type with Cargo features. By default, both types are supported. The Websocket connection can be used on any platform. The native connection can be used on any platform that the TDengine Client supports. -The source code for the Rust connectors is located on [GitHub](https://github.com/taosdata/taos-connector-rust). +The source code for the Rust client library is located on [GitHub](https://github.com/taosdata/taos-connector-rust). ## Supported platforms @@ -37,7 +37,7 @@ Websocket connections are supported on all platforms that can run Go. | v0.7.6 | 3.0.3.0 | Support req_id in query. | | v0.6.0 | 3.0.0.0 | Base features. | -The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. +The Rust client library is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. ## Handling exceptions @@ -82,7 +82,7 @@ Note: Only TAG supports JSON types * Install the Rust development toolchain * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](../#install-client-driver) -### Install the connectors +### Install the client library Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: @@ -154,8 +154,8 @@ The structure of the DSN description string is as follows: The parameters are described as follows: -- **driver**: Specify a driver name so that the connector can choose which method to use to establish the connection. Supported driver names are as follows: - - **taos**: Table names use the TDengine connector driver. +- **driver**: Specify a driver name so that the client library can choose which method to use to establish the connection. Supported driver names are as follows: + - **taos**: Table names use the TDengine native connection driver. - **tmq**: Use the TMQ to subscribe to data. - **http/ws**: Use Websocket to establish connections. - **https/wss**: Use Websocket to establish connections, and enable SSL/TLS. @@ -327,7 +327,7 @@ Parameter binding details see [API Reference](#stmt-api) ### Schemaless Writing -TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../reference/schemaless/). @@ -347,7 +347,7 @@ client.put(&sml_data)? ### Data Subscription -TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). +TDengine starts subscriptions through [TMQ](../../taos-sql/tmq/). #### Create a Topic @@ -361,7 +361,7 @@ taos.exec_many([ #### Create a Consumer -You create a TMQ connector by using a DSN. +You create a TMQ connection by using a DSN. ```rust let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; @@ -442,7 +442,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m - `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. - `client.id`: Subscriber client ID. -- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](https://docs.tdengine.com/develop/tmq/). Note: This parameter is set per consumer group. +- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default value varies depending on the TDengine version. For details, see [Data Subscription](../../develop/tmq/). Note: This parameter is set per consumer group. - `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. - `auto.commit.interval.ms`: Interval for automatic commits. @@ -488,7 +488,7 @@ The source code of the sample application is under `TDengine/examples/rust` : ## Frequently Asked Questions -For additional troubleshooting, see [FAQ](../../../train-faq/faq). +For additional troubleshooting, see [FAQ](../../train-faq/faq). ## API Reference diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/08-client-libraries/07-python.mdx similarity index 91% rename from docs/en/14-reference/03-connector/07-python.mdx rename to docs/en/08-client-libraries/07-python.mdx index ccc270d3be..4a06c42c12 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/08-client-libraries/07-python.mdx @@ -1,20 +1,20 @@ --- -title: TDengine Python Connector +title: TDengine Python Client Library sidebar_label: Python -description: This document describes taospy, the TDengine Python connector. +description: This document describes taospy, the TDengine Python client library. --- import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; -`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. +`taospy` is the official Python client library for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). `taos-ws-py` is an optional package to enable using WebSocket to connect TDengine. The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST or WebSocket interface provided by taosAdapter is referred to hereinafter as a "REST connection" or "WebSocket connection". -The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). +The source code for the Python client library is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Supported platforms - The [supported platforms](../#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. @@ -29,13 +29,13 @@ The source code for the Python connector is hosted on [GitHub](https://github.co We recommend using the latest version of `taospy`, regardless of the version of TDengine. -|Python Connector Version|major changes| +|Python Client Library Version|major changes| |:-------------------:|:----:| |2.7.12|1. added support for `varbinary` type (STMT does not yet support)
2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))| |2.7.9|support for getting assignment and seek function on subscription| |2.7.8|add `execute_many` method| -|Python Websocket Connector Version|major changes| +|Python Websocket Connection Version|major changes| |:----------------------------:|:-----:| |0.2.9|bugs fixes| |0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| @@ -43,9 +43,9 @@ We recommend using the latest version of `taospy`, regardless of the version of ## Handling Exceptions -There are 4 types of exception in python connector. +There are 4 types of exception in python client library. -- The exception of Python Connector itself. +- The exception of Python client library itself. - The exception of native library. - The exception of websocket - The exception of subscription. @@ -55,7 +55,7 @@ There are 4 types of exception in python connector. |:--------:|:---------:|:---------------:| |InterfaceError|the native library is too old that it cannot support the function|please check the TDengine client version| |ConnectionError|connection error|please check TDengine's status and the connection params| -|DatabaseError|database error|please upgrade Python connector to latest| +|DatabaseError|database error|please upgrade Python client library to latest| |OperationalError|operation error|| |ProgrammingError||| |StatementError|the exception of stmt|| @@ -65,7 +65,7 @@ There are 4 types of exception in python connector. It usually uses try-expect to handle exceptions in python. For exception handling, please refer to [Python Errors and Exceptions Documentation](https://docs.python.org/3/tutorial/errors.html). -All exceptions from the Python Connector are thrown directly. Applications should handle these exceptions. For example: +All exceptions from the Python client library are thrown directly. Applications should handle these exceptions. For example: ```python {{#include docs/examples/python/handle_exception.py}} @@ -101,14 +101,14 @@ If you use a native connection, you will also need to [Install Client Driver](.. #### Uninstalling an older version -If you have installed an older version of the Python Connector, please uninstall it beforehand. +If you have installed an older version of the Python client library, please uninstall it beforehand. ``` pip3 uninstall taos taospy ``` :::note -Earlier TDengine client software includes the Python connector. If the Python connector is installed from the client package's installation directory, the corresponding Python package name is `taos`. So the above uninstall command includes `taos`, and it doesn't matter if it doesn't exist. +Earlier TDengine client software includes the Python client library. If the Python client library is installed from the client package's installation directory, the corresponding Python package name is `taos`. So the above uninstall command includes `taos`, and it doesn't matter if it doesn't exist. ::: @@ -160,7 +160,7 @@ pip3 install taos-ws-py -For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type. +For native connection, you need to verify that both the client driver and the Python client library itself are installed correctly. The client driver and Python client library have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type. ```python import taos @@ -202,7 +202,7 @@ Requirement already satisfied: taospy in c:\users\username\appdata\local\program ### Connectivity testing -Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster. +Before establishing a connection with the client library, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster. @@ -444,7 +444,7 @@ The best practice for TaosCursor is to create a cursor at the beginning of a que ##### Use of the RestClient class -The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. +The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. ```python title="Use of RestClient" {{#include docs/examples/python/rest_client_example.py}} @@ -501,7 +501,7 @@ The queried results can only be fetched once. For example, only one of `fetch_al -The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. +The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. ```python {{#include docs/examples/python/rest_client_example.py}} @@ -561,7 +561,7 @@ The `TaosConnection` class and the `TaosResult` class already implement all the ##### Use of the RestClient class -The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. +The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. ```python title="Use of RestClient" {{#include docs/examples/python/rest_client_with_req_id_example.py}} @@ -624,7 +624,7 @@ As the way to connect introduced above but add `req_id` argument. ### Writing data via parameter binding -The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound. +The Python client library provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound. @@ -755,7 +755,7 @@ stmt.close() ### Schemaless Writing -Connector support schemaless insert. +Client library support schemaless insert. @@ -817,11 +817,11 @@ There is a optional parameter called `req_id` in `schemaless_insert` and `schema ### Data Subscription -Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/). +Client library support data subscription. For more information about subscroption, please refer to [Data Subscription](../../develop/tmq/). #### Create a Topic -To create topic, please refer to [Data Subscription](../../../develop/tmq/#create-a-topic). +To create topic, please refer to [Data Subscription](../../develop/tmq/#create-a-topic). #### Create a Consumer @@ -829,7 +829,7 @@ To create topic, please refer to [Data Subscription](../../../develop/tmq/#creat -The consumer in the connector contains the subscription api. The syntax for creating a consumer is consumer = Consumer(configs). For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer). +The consumer in the client library contains the subscription api. The syntax for creating a consumer is consumer = Consumer(configs). For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer). ```python from taos.tmq import Consumer @@ -840,9 +840,9 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) -In addition to native connections, the connector also supports subscriptions via websockets. +In addition to native connections, the client library also supports subscriptions via websockets. -The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer). +The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer). ```python import taosws @@ -1025,7 +1025,7 @@ consumer.close() ### About nanoseconds -Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full. +Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python client library may modify the interface in the future if Python officially supports nanoseconds in full. 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ diff --git a/docs/en/14-reference/03-connector/08-node.mdx b/docs/en/08-client-libraries/08-node.mdx similarity index 83% rename from docs/en/14-reference/03-connector/08-node.mdx rename to docs/en/08-client-libraries/08-node.mdx index bed06477f1..71f360c6d1 100644 --- a/docs/en/14-reference/03-connector/08-node.mdx +++ b/docs/en/08-client-libraries/08-node.mdx @@ -1,7 +1,7 @@ --- -title: TDengine Node.js Connector +title: TDengine Node.js Client Library sidebar_label: Node.js -description: This document describes the TDengine Node.js connector. +description: This document describes the TDengine Node.js client library. toc_max_heading_level: 4 --- @@ -9,22 +9,22 @@ import Tabs from "@theme/Tabs"; import TabItem from "@theme/TabItem"; import Preparition from "./_preparation.mdx"; -import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx"; -import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx"; -import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx"; -import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; -import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; +import NodeInsert from "../07-develop/03-insert-data/_js_sql.mdx"; +import NodeInfluxLine from "../07-develop/03-insert-data/_js_line.mdx"; +import NodeOpenTSDBTelnet from "../07-develop/03-insert-data/_js_opts_telnet.mdx"; +import NodeOpenTSDBJson from "../07-develop/03-insert-data/_js_opts_json.mdx"; +import NodeQuery from "../07-develop/04-query-data/_js.mdx"; -`@tdengine/client` and `@tdengine/rest` are the official Node.js connectors. Node.js developers can develop applications to access TDengine instance data. Note: The connectors for TDengine 3.0 are different than those for TDengine 2.x. The new connectors do not support TDengine 2.x. +`@tdengine/client` and `@tdengine/rest` are the official Node.js client libraries. Node.js developers can develop applications to access TDengine instance data. Note: The client libraries for TDengine 3.0 are different than those for TDengine 2.x. The new client libraries do not support TDengine 2.x. -`@tdengine/client` is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. `@tdengine/rest` is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The REST connector can run on any platform, but performance is slightly degraded, and the interface implements a somewhat different set of functional features than the native interface. +`@tdengine/client` is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. `@tdengine/rest` is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The REST client library can run on any platform, but performance is slightly degraded, and the interface implements a somewhat different set of functional features than the native interface. -The source code for the Node.js connectors is located on [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0). +The source code for the Node.js client libraries is located on [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0). ## Supported platforms -The platforms supported by the native connector are the same as those supported by the TDengine client driver. -The REST connector supports all platforms that can run Node.js. +The platforms supported by the native client library are the same as those supported by the TDengine client driver. +The REST client library supports all platforms that can run Node.js. ## Version support @@ -34,7 +34,7 @@ Please refer to [version support list](../#version-support) - + 1. Connection Management 2. General Query @@ -44,7 +44,7 @@ Please refer to [version support list](../#version-support) 6. Schemaless - + 1. Connection Management 2. General Query @@ -58,7 +58,7 @@ Please refer to [version support list](../#version-support) ### Pre-installation preparation - Install the Node.js development environment -- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS. +- If you are using the REST client library, skip this step. However, if you use the native client library, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS. @@ -103,14 +103,14 @@ If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and ### Install via npm - + ```bash npm install @tdengine/client ``` - + ```bash npm install @tdengine/rest @@ -122,7 +122,7 @@ npm install @tdengine/rest ### Verify - + After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine. @@ -141,7 +141,7 @@ node nodejsChecker.js host=localhost - After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query. - + After installing the TDengine client, use the `restChecker.js` program to verify that the current environment supports Node.js access to TDengine. @@ -164,7 +164,7 @@ node restChecker.js ## Establishing a connection -Please choose to use one of the connectors. +Please choose to use one of the client libraries. @@ -288,7 +288,7 @@ let cursor = conn.cursor(); | [schemless insert](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/schemaless.js) | Schemaless insert | | [TMQ](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/tmq.js) | Using data subscription | | [asyncQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/asyncQueryExample.js) | Using asynchronous queries | -| [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | Using TypeScript with the REST connector | +| [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | Using TypeScript with the REST client library | ## Usage limitations @@ -316,7 +316,7 @@ let cursor = conn.cursor(); ## Important update records -### Native connectors +### Native client library | package name | version | TDengine version | Description | |------------------|---------|---------------------|------------------------------------------------------------------| @@ -324,7 +324,7 @@ let cursor = conn.cursor(); | td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. | | td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface | | td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription | -### REST Connector +### REST client library | package name | version | TDengine version | Description | |----------------------|---------|---------------------|---------------------------------------------------------------------------| @@ -334,6 +334,3 @@ let cursor = conn.cursor(); | td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token | | td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries | -## API Reference - -[API Reference](https://docs.taosdata.com/api/td2.0-connector/) diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/08-client-libraries/09-csharp.mdx similarity index 99% rename from docs/en/14-reference/03-connector/09-csharp.mdx rename to docs/en/08-client-libraries/09-csharp.mdx index 25bef1cf51..71cbacb515 100644 --- a/docs/en/14-reference/03-connector/09-csharp.mdx +++ b/docs/en/08-client-libraries/09-csharp.mdx @@ -1,7 +1,7 @@ --- -title: TDengine C# Connector +title: TDengine C# Client Library sidebar_label: C# -description: This document describes the TDengine C# connector. +description: This document describes the TDengine C# client library. toc_max_heading_level: 4 --- @@ -509,7 +509,7 @@ namespace NativeStmt stmt.AddBatch(); stmt.Exec(); var affected = stmt.Affected(); - Console.WriteLine($"affected rows: {affected}"); + Console.WriteLine($"affected rows: {affected}"); } } catch (Exception e) @@ -554,7 +554,7 @@ namespace WSStmt stmt.AddBatch(); stmt.Exec(); var affected = stmt.Affected(); - Console.WriteLine($"affected rows: {affected}"); + Console.WriteLine($"affected rows: {affected}"); } } catch (Exception e) @@ -950,7 +950,7 @@ namespace NativeSubscription } } } - + static void InsertData() { var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata"); @@ -1033,7 +1033,7 @@ namespace WSSubscription } } } - + static void InsertData() { var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata"); diff --git a/docs/en/08-client-libraries/50-odbc.mdx b/docs/en/08-client-libraries/50-odbc.mdx new file mode 100644 index 0000000000..08b2c031c6 --- /dev/null +++ b/docs/en/08-client-libraries/50-odbc.mdx @@ -0,0 +1,75 @@ +--- +sidebar_label: ODBC +title: TDengine ODBC +--- + + +## Introduction + +TDengine ODBC driver is a driver specifically designed for TDengine based on the ODBC standard. It can be used by ODBC based applications on Windows to access a local or remote TDengine cluster or TDengine cloud service, like [PowerBI](https://powerbi.microsoft.com). + +TDengine ODBC provides two kinds of connections, native connection and WebSocket connection. You can choose to use either one for your convenience, WebSocket is recommded choice and you must use WebSocket if you are trying to access TDengine cloud service. + +Note: TDengine ODBC driver can only be run on 64-bit system, and can only be invoked by 64-bit applications. + + +## Install + +1. TDengine ODBC driver supports only Windows platform. To run on Windows, VisualStudio C Runtime library is required. If VisualStudio C Runtime Library is missing on your platform, you can download and install it from [VC Runtime Library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). + +2. Install TDengine client package for Windows, the version should be above 3.2.1.0, the client package includes both TDengine ODBC driver and some other necessary libraries that will be used in either native connection or WebSocket connection. + +## Configure Data Source + +### Connection Types + +TDengine ODBC driver supports two kinds of connections to TDengine cluster, native connection and WebSocket connection, here is the major differences between them. + +1. Only WebSocket can connect to TDengine cloud service. + +2. Websocket connection is more compatible with different TDengine server versions, normally you don't need to uupgrade client package with the server side. + +3. Native connection normally has better performance, but you need to keep the version aligned with the server side. + +4. For most users, it's recommended to use **WebSocket** connection, which has much better compatibility and almost same performance as native connection. + +### WebSocket Connection + +1. Click the "Start" Menu, and Search for "ODBC", and choose "ODBC Data Source (64-bit)" (Note: Don't choose 32-bit) + +2. Select "User DSN" tab, and click "Add" to enter the page for "Create Data Source" + +3. Choose the data source to be added, here we choose "TDengine" + +4. Click "Finish", and enter the configuration page for "TDengine ODBC Data Source", fill in required fields + + ![ODBC websocket connection config](./assets/odbc-ws-config-en.webp) + + 4.1 [DSN]: Data Source Name, required field, to name the new ODBC data source + + 4.2 [Connection Type]: required field, we choose "WebSocket" + + 4.3 [URL]: required field, the URL for the ODBC data source, for example, `http://localhost:6041` is the URL for a local TDengine cluster, `https://gw.cloud.taosdata.com?token=your_token` is the URL for a TDengine cloud service. + + 4.4 [Database]: optional field, the default database to access + + 4.5 [User]: optional field, only used for connection testing in step 5; If it's left as blank, "root" user will be used by default. + + 4.6 [Password]: optional field, only used for connection testing in step 5; + +5. Click "Test Connecting" to test whether the data source can be connectted; if successful, it will prompt "connecting success" + +6. Click "OK" to sae the configuration and exit. + +7. You can also select an already configured data source name in step 2 to change existing configuration. + +### Native Connection + +Please be noted that native connection can't be used to access a TDengine client service. + +The steps are exactly same as "WebSocket" connection, except for you choose "Native" in step 4.2. + + +## PowerBI + +As an example, you can use PowerBI, which inovkes TDengine ODBC driver, to access TDengine, please refer to[Power BI](../../third-party/powerbi) for more details. diff --git a/docs/en/14-reference/03-connector/60-r-lang.mdx b/docs/en/08-client-libraries/60-r-lang.mdx similarity index 97% rename from docs/en/14-reference/03-connector/60-r-lang.mdx rename to docs/en/08-client-libraries/60-r-lang.mdx index f1cbb89f7b..881644c7f4 100644 --- a/docs/en/14-reference/03-connector/60-r-lang.mdx +++ b/docs/en/08-client-libraries/60-r-lang.mdx @@ -1,13 +1,13 @@ --- toc_max_heading_level: 4 sidebar_label: R -title: R Language Connector +title: R Language Client Library --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import Rdemo from "../../07-develop/01-connect/_connect_r.mdx" +import Rdemo from "../07-develop/01-connect/_connect_r.mdx" By using the RJDBC library in R, you can enable R programs to access TDengine data. Here are the installation process, configuration steps, and an example code in R. diff --git a/docs/en/14-reference/03-connector/80-php.mdx b/docs/en/08-client-libraries/80-php.mdx similarity index 89% rename from docs/en/14-reference/03-connector/80-php.mdx rename to docs/en/08-client-libraries/80-php.mdx index bff9e8e5d5..ccaa2f8d55 100644 --- a/docs/en/14-reference/03-connector/80-php.mdx +++ b/docs/en/08-client-libraries/80-php.mdx @@ -1,12 +1,12 @@ --- -title: PHP Connector +title: PHP Client Library sidebar_label: PHP -description: This document describes the TDengine PHP connector. +description: This document describes the TDengine PHP client library. --- -`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine. +`php-tdengine` is the TDengine PHP client library provided by TDengine community. In particular, it supports Swoole coroutine. -PHP Connector relies on TDengine client driver. +PHP client library relies on TDengine client driver. Project Repository: @@ -81,7 +81,7 @@ Option Two: Use CLI `php -dextension=tdengine test.php`. ## Sample Programs -In this section a few sample programs which use TDengine PHP connector to access TDengine cluster are demonstrated. +In this section a few sample programs which use TDengine PHP client library to access TDengine cluster are demonstrated. > Any error would throw exception: `TDengine\Exception\TDengineException` diff --git a/docs/en/08-client-libraries/_category_.yml b/docs/en/08-client-libraries/_category_.yml new file mode 100644 index 0000000000..a70a33caa6 --- /dev/null +++ b/docs/en/08-client-libraries/_category_.yml @@ -0,0 +1 @@ +label: "Client Libraries" diff --git a/docs/en/14-reference/03-connector/_linux_install.mdx b/docs/en/08-client-libraries/_linux_install.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_linux_install.mdx rename to docs/en/08-client-libraries/_linux_install.mdx diff --git a/docs/en/14-reference/03-connector/_macos_install.mdx b/docs/en/08-client-libraries/_macos_install.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_macos_install.mdx rename to docs/en/08-client-libraries/_macos_install.mdx diff --git a/docs/en/14-reference/03-connector/_preparation.mdx b/docs/en/08-client-libraries/_preparation.mdx similarity index 85% rename from docs/en/14-reference/03-connector/_preparation.mdx rename to docs/en/08-client-libraries/_preparation.mdx index 99887ac36b..ba282b0bfb 100644 --- a/docs/en/14-reference/03-connector/_preparation.mdx +++ b/docs/en/08-client-libraries/_preparation.mdx @@ -2,7 +2,7 @@ :::info -Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](../../get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package). +Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](../get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package). - libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately. - taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately. diff --git a/docs/en/14-reference/03-connector/_verify_linux.mdx b/docs/en/08-client-libraries/_verify_linux.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_verify_linux.mdx rename to docs/en/08-client-libraries/_verify_linux.mdx diff --git a/docs/en/14-reference/03-connector/_verify_macos.mdx b/docs/en/08-client-libraries/_verify_macos.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_verify_macos.mdx rename to docs/en/08-client-libraries/_verify_macos.mdx diff --git a/docs/en/14-reference/03-connector/_verify_windows.mdx b/docs/en/08-client-libraries/_verify_windows.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_verify_windows.mdx rename to docs/en/08-client-libraries/_verify_windows.mdx diff --git a/docs/en/14-reference/03-connector/_windows_install.mdx b/docs/en/08-client-libraries/_windows_install.mdx similarity index 100% rename from docs/en/14-reference/03-connector/_windows_install.mdx rename to docs/en/08-client-libraries/_windows_install.mdx diff --git a/docs/en/08-client-libraries/assets/odbc-ws-config-en.webp b/docs/en/08-client-libraries/assets/odbc-ws-config-en.webp new file mode 100644 index 0000000000..aaca2e99b4 Binary files /dev/null and b/docs/en/08-client-libraries/assets/odbc-ws-config-en.webp differ diff --git a/docs/en/14-reference/03-connector/connector.webp b/docs/en/08-client-libraries/connector.webp similarity index 100% rename from docs/en/14-reference/03-connector/connector.webp rename to docs/en/08-client-libraries/connector.webp diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/08-client-libraries/index.mdx similarity index 78% rename from docs/en/14-reference/03-connector/index.mdx rename to docs/en/08-client-libraries/index.mdx index 4a3e9195d6..7cf2839609 100644 --- a/docs/en/14-reference/03-connector/index.mdx +++ b/docs/en/08-client-libraries/index.mdx @@ -1,15 +1,15 @@ --- -title: Connector -description: This document describes the connectors that TDengine provides to interface with various programming languages. +title: Client Libraries +description: This document describes the client libraries that TDengine provides to interface with various programming languages. --- -TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. +TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports client libraries for multiple programming languages, including official libraries for C/C++, Java, Python, Go, Node.js, C#, and Rust. These client libraries support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial client libraries, such as the ADO.NET, Lua, and PHP libraries. -![TDengine Database image-connector](./connector.webp) +![TDengine client library connections](./connector.webp) ## Supported platforms -Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux/Windows/macOS development environments. The comparison matrix is as follows. +Currently, TDengine's native interface client libraries can support platforms such as x64 and ARM hardware platforms and Linux/Windows/macOS development environments. The comparison matrix is as follows. | **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ | | -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- | @@ -25,7 +25,7 @@ Using REST connection can support a broader range of operating systems as it doe ## Version support -TDengine version updates often add new features, and the connector versions in the list are the best-fit versions of the connector. +TDengine version updates often add new features, and the client library versions in the list are the best-fit versions of the library. | **TDengine Versions** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** | | --------------------------- | -------------- | -------------- | -------------- | ------------- | --------------- | --------------- | @@ -37,7 +37,7 @@ TDengine version updates often add new features, and the connector versions in t ## Functional Features -Comparing the connector support for TDengine functional features as follows. +Comparing the client library support for TDengine functional features as follows. ### Using the native interface (taosc) @@ -66,7 +66,7 @@ The different database framework specifications for various programming language :::warning -- Regardless of the programming language chosen for the connector, TDengine versions 2.0 and above recommend that each thread of a database application create a separate connection. Or create a connection pool based on threads to avoid interference between threads with the "USE statement" state within a connection (but the connection's query and write operations are thread-safe). +- Regardless of the programming language chosen for the client library, TDengine versions 2.0 and above recommend that each thread of a database application create a separate connection. Or create a connection pool based on threads to avoid interference between threads with the "USE statement" state within a connection (but the connection's query and write operations are thread-safe). ::: @@ -82,7 +82,7 @@ import VerifyMacOS from "./_verify_macos.mdx"; ## Install Client Driver :::info -The client driver needs to be installed if you use the native interface connector on a system that does not have the TDengine server software installed. +The client driver needs to be installed if you use the native interface connection on a system that does not have the TDengine server software installed. ::: diff --git a/docs/en/14-reference/03-connector/tdengine-jdbc-connector.webp b/docs/en/08-client-libraries/tdengine-jdbc-connector.webp similarity index 100% rename from docs/en/14-reference/03-connector/tdengine-jdbc-connector.webp rename to docs/en/08-client-libraries/tdengine-jdbc-connector.webp diff --git a/docs/en/10-deployment/02-docker.md b/docs/en/10-deployment/02-docker.md index 63153f3033..2a4511c7b1 100644 --- a/docs/en/10-deployment/02-docker.md +++ b/docs/en/10-deployment/02-docker.md @@ -21,6 +21,7 @@ docker run -d --name tdengine \ * /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. And also you can modify ~/data/taos/dnode/data to your any other local emtpy data directory * /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. And also you can modify ~/data/taos/dnode/log to your any other local empty log directory + ::: The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. @@ -42,7 +43,7 @@ taos> show databases; Query OK, 2 row(s) in set (0.002843s) ``` -The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios. +The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various client libraries (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various client libraries for complex scenarios. ## Start TDengine on the host network @@ -83,7 +84,7 @@ Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`. echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts ``` -Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address. +Finally, the TDengine service can be accessed from the TDengine CLI or any client library with "tdengine" as the server address. ```shell taos -h tdengine -P 6030 @@ -324,6 +325,7 @@ services: - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time + ::: 2. Start the cluster diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md index a0e036664a..ccf340b511 100644 --- a/docs/en/12-taos-sql/02-database.md +++ b/docs/en/12-taos-sql/02-database.md @@ -46,6 +46,7 @@ database_option: { - last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function. - last_value: The last non-null value of each column in each subtable is cached. This option significantly improves the performance of the LAST function under normal circumstances, such as statements including the WHERE, ORDER BY, GROUP BY, and INTERVAL keywords. - both: The last row of each subtable and the last non-null value of each column in each subtable are cached. + Note: If you turn on cachemodel, then turn off, and turn on again, the result of last/last_row may be wrong, don't do like this, it's strongly recommended to always turn on the cache using "both". - CACHESIZE: specifies the amount (in MB) of memory used for subtable caching on each vnode. Enter a value between 1 and 65536. The default value is 1. - COMP: specifies how databases are compressed. The default value is 2. - 0: Compression is disabled. @@ -137,6 +138,10 @@ You can use `show .vgroups;` to check the value of cacheload. If the value of `cacheload` is very close to the value of `cachesize`, then it's very probably that `cachesize` is too small. If the value of `cacheload` is much smaller than the value of `cachesize`, then `cachesize` is big enough. You can use this simple principle to determine. Depending on how much memory is available in your system, you can choose to double `cachesize` or incrase it by even 5 or more times. +4. stt_trigger + +Pleae make sure stopping data writing before trying to alter stt_trigger parameter. + :::note Other parameters cannot be modified after the database has been created. diff --git a/docs/en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md index f6e39a9734..57e15746cc 100644 --- a/docs/en/12-taos-sql/05-insert.md +++ b/docs/en/12-taos-sql/05-insert.md @@ -158,8 +158,8 @@ Automatically creating table and the table name is specified through the `tbname ```sql INSERT INTO meters(tbname, location, groupId, ts, current, phase) - values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 0.32) - ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 0.33) - ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 0.33) + values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) + ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) + ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) ``` diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md index f1e19a5449..2606b183b8 100755 --- a/docs/en/12-taos-sql/06-select.md +++ b/docs/en/12-taos-sql/06-select.md @@ -259,7 +259,11 @@ The GROUP BY clause does not guarantee that the results are ordered. If you want ## PARTITION BY -The PARTITION BY clause is a TDengine-specific extension to standard SQL. This clause partitions data based on the part_list and performs computations per partition. +The PARTITION BY clause is a TDengine-specific extension to standard SQL introduced in TDengine 3.0. This clause partitions data based on the part_list and performs computations per partition. + +PARTITION BY and GROUP BY have similar meanings. They both group data according to a specified list and then perform calculations. The difference is that PARTITION BY does not have various restrictions on the SELECT list of the GROUP BY clause. Any operation can be performed within the group (constants, aggregations, scalars, expressions, etc.). Therefore, PARTITION BY is fully compatible with GROUP BY in terms of usage. All places that use the GROUP BY clause can be replaced with PARTITION BY. + +Because PARTITION BY does not require returning a row of aggregated data, it can also support various window operations after grouping slices. All window operations that need to be grouped can only use the PARTITION BY clause. For more information, see TDengine Extensions. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index 4f8ccc418b..851ef86b67 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -626,9 +626,9 @@ TIMEDIFF(expr1, expr2 [, time_unit]) #### TIMETRUNCATE ```sql -TIMETRUNCATE(expr, time_unit [, ignore_timezone]) +TIMETRUNCATE(expr, time_unit [, use_current_timezone]) -ignore_timezone: { +use_current_timezone: { 0 | 1 } @@ -647,10 +647,11 @@ ignore_timezone: { 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) - The precision of the returned timestamp is same as the precision set for the current data base in use - If the input data is not formatted as a timestamp, the returned value is null. -- If `1d` is used as `time_unit` to truncate the timestamp, `ignore_timezone` option can be set to indicate if the returned result is affected by client timezone or not. - For example, if client timezone is set to UTC+0800, TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) will return '2020-01-01 08:00:00'. - Otherwise, TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) will return '2020-01-01 00:00:00'. - If `ignore_timezone` option is omitted, the default value is set to 1. +- When using 1d/1w as the time unit to truncate timestamp, you can specify whether to truncate based on the current time zone by setting the use_current_timezone parameter. + Value 0 indicates truncation using the UTC time zone, value 1 indicates truncation using the current time zone. + For example, if the time zone configured by the Client is UTC + 0800, TIMETRUNCATE ('2020-01-01 23:00:00', 1d, 0) returns the result of '2020-01-01 08:00:00'. + When using TIMETRUNCATE ('2020-01-01 23:00:00', 1d, 1), the result is 2020-01-01 00:00:00 '. + When use_current_timezone is not specified, use_current_timezone defaults to 1. #### TIMEZONE diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index 502b7562f3..90237a54f5 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -16,7 +16,10 @@ When you query a supertable, you may need to partition the supertable by some di PARTITION BY part_list ``` -part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items. +part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items. For example, grouping data by label location, taking the average voltage within each group. +```sql +select location, avg(voltage) from meters partition by location +``` A PARTITION BY clause is processed as follows: @@ -25,10 +28,13 @@ A PARTITION BY clause is processed as follows: - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql -select max(current) from meters partition by location interval(10m) +select _wstart, location, max(current) from meters partition by location interval(10m) ``` -The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data. +The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data. For example, calculate the average voltage of each meter every 10 minutes +```sql +select _wstart, tbname, avg(voltage) from meters partition by tbname interval(10m) +``` ## Windowed Queries diff --git a/docs/en/12-taos-sql/13-tmq.md b/docs/en/12-taos-sql/13-tmq.md index 1a805c76fb..d14b6da2d3 100644 --- a/docs/en/12-taos-sql/13-tmq.md +++ b/docs/en/12-taos-sql/13-tmq.md @@ -41,7 +41,7 @@ The preceding command displays all topics in the current database. ## Create Consumer Group -You can create consumer groups only through the TDengine Client driver or the API provided by a connector. +You can create consumer groups only through the TDengine Client driver or the API provided by a client library. ## Delete Consumer Group diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 5cb680e34b..76dc3b6b58 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -6,7 +6,7 @@ description: This document describes the TDengine REST API. To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database. :::note -One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. +One difference from the native connection is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. ::: ## Installation diff --git a/docs/en/14-reference/03-connector/_category_.yml b/docs/en/14-reference/03-connector/_category_.yml deleted file mode 100644 index 6a766e9657..0000000000 --- a/docs/en/14-reference/03-connector/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: "Connector" diff --git a/docs/en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md index 8512f5b59d..d69f3876d4 100644 --- a/docs/en/14-reference/08-taos-shell.md +++ b/docs/en/14-reference/08-taos-shell.md @@ -8,7 +8,7 @@ The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is ## Installation -If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](../connector/). +If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Install Client Driver](../../client-libraries/#install-client-driver). ## Execution diff --git a/docs/en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md index 779882f582..bc1133b9a7 100644 --- a/docs/en/14-reference/09-support-platform/index.md +++ b/docs/en/14-reference/09-support-platform/index.md @@ -1,6 +1,6 @@ --- title: List of supported platforms -description: This document describes the supported platforms for the TDengine server, client, and connectors. +description: This document describes the supported platforms for the TDengine server, client, and client libraries. --- ## List of supported platforms for TDengine server @@ -12,9 +12,9 @@ description: This document describes the supported platforms for the TDengine se Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by the enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition. -## List of supported platforms for TDengine clients and connectors +## List of supported platforms for TDengine clients and client libraries -TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha/LoongArch64 hardware platforms and Linux/Win64/Win32/macOS development environments. +TDengine's client libraries can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha/LoongArch64 hardware platforms and Linux/Win64/Win32/macOS development environments. The comparison matrix is as follows. diff --git a/docs/en/14-reference/index.md b/docs/en/14-reference/index.md index bc8ec69965..d57eee512a 100644 --- a/docs/en/14-reference/index.md +++ b/docs/en/14-reference/index.md @@ -1,9 +1,9 @@ --- title: Reference -description: This document describes TDengine connectors and utilities. +description: This document describes TDengine utilities. --- -This section describes the TDengine connectors and utilities. +This section describes the TDengine utilities. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md index cac4f5f604..42266d232c 100644 --- a/docs/en/20-third-party/11-kafka.md +++ b/docs/en/20-third-party/11-kafka.md @@ -94,7 +94,7 @@ The output as bellow: The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix). -TDengine Sink Connector internally uses TDengine [modeless write interface](../../reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json). +TDengine Sink Connector internally uses TDengine [modeless write interface](../../client-libraries/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json). The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format. diff --git a/docs/en/20-third-party/13-Jupyter.md b/docs/en/20-third-party/13-Jupyter.md index 1ac9df1da4..79636cd504 100644 --- a/docs/en/20-third-party/13-Jupyter.md +++ b/docs/en/20-third-party/13-Jupyter.md @@ -4,7 +4,7 @@ sidebar_label: JupyterLab description: This document describes how to integrate TDengine with JupyterLab. --- -JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab. +JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python client library to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab. ## Install JupyterLab Installing JupyterLab is very easy. Installation instructions can be found at: @@ -36,8 +36,8 @@ jupyter lab ```` This will automatically launch your default browser and connect to your JupyterLab instance, usually on port 8888. -## Install the TDengine Python connector -You can now install the TDengine Python connector as follows. +## Install the TDengine Python client library +You can now install the TDengine Python client library as follows. Start a new Python kernel in JupyterLab. @@ -55,8 +55,8 @@ import sys ```` ## Connect to TDengine -You can find detailed examples to use the Python connector, in the TDengine documentation here. -Once you have installed the TDengine Python connector in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab. +You can find detailed examples to use the Python client library, in the TDengine documentation here. +Once you have installed the TDengine Python client library in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab. Each TDengine instance, has a database called "log" which has monitoring information about the TDengine instance. In the "log" database there is a [supertable](https://docs.tdengine.com/taos-sql/stable/) called "disks_info". @@ -96,4 +96,4 @@ result = sqlQuery(conn) print(result) ```` -TDengine has connectors for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels). +TDengine has client libraries for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels). diff --git a/docs/en/20-third-party/14-dbeaver.md b/docs/en/20-third-party/14-dbeaver.md index fd0a0672f2..c949987705 100644 --- a/docs/en/20-third-party/14-dbeaver.md +++ b/docs/en/20-third-party/14-dbeaver.md @@ -19,7 +19,7 @@ To use DBeaver to manage TDengine, you need to prepare the following: ![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp) -2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. +2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java client library installed on the local machine, DBeaver will prompt you to download and install it. ![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp)) diff --git a/docs/en/20-third-party/50-qstudio.md b/docs/en/20-third-party/50-qstudio.md index 8339678a0e..4140f87775 100644 --- a/docs/en/20-third-party/50-qstudio.md +++ b/docs/en/20-third-party/50-qstudio.md @@ -19,7 +19,7 @@ To connect TDengine using qStudio, you need to complete the following preparatio ![qConnecting TDengine with qStudio](./qstudio/qstudio-connect-tdengine.webp) -2. Configure the TDengine connection by entering the host address, port number, username, and password. If TDengine is deployed on the local machine, you can fill in the username and password only. The default username is "root," and the default password is "taosdata." Click "Test" to test the connection's availability. If the TDengine Java connector is not installed on the local machine, qStudio will prompt you to download and install it. +2. Configure the TDengine connection by entering the host address, port number, username, and password. If TDengine is deployed on the local machine, you can fill in the username and password only. The default username is "root," and the default password is "taosdata." Click "Test" to test the connection's availability. If the TDengine Java client library is not installed on the local machine, qStudio will prompt you to download and install it. ![Download Java Connector](./qstudio/qstudio-jdbc-connector-download.webp) diff --git a/docs/en/20-third-party/70-seeq.md b/docs/en/20-third-party/70-seeq.md index e42204dd5d..43c3e79b72 100644 --- a/docs/en/20-third-party/70-seeq.md +++ b/docs/en/20-third-party/70-seeq.md @@ -10,20 +10,20 @@ description: How to use Seeq and TDengine to perform time series data analysis Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers. -TDengine can be added as a data source into Seeq via JDBC connector. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting. +TDengine can be added as a data source into Seeq via JDBC client library. Once data source is configured, Seeq can read data from TDengine and offers functionalities such as data visualization, analysis, and forecasting. ## Prerequisite 1. Install Seeq Server and Seeq Data Lab software 2. Install TDengine or register TDengine Cloud service -## Install TDengine JDBC connector +## Install TDengine JDBC client library 1. Get Seeq data location configuration ``` sudo seeq config get Folders/Data ``` -2. Download the latest TDengine Java connector from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/ +2. Download the latest TDengine Java client library from maven.org (current is version is [3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar)), and copy the JAR file into the_directory_found_in_step_1/plugins/lib/ 3. Restart Seeq server ``` sudo seeq restart diff --git a/docs/en/20-third-party/75-powerbi.md b/docs/en/20-third-party/75-powerbi.md new file mode 100644 index 0000000000..4744467f30 --- /dev/null +++ b/docs/en/20-third-party/75-powerbi.md @@ -0,0 +1,65 @@ +--- +sidebar_label: Power BI +title: Power BI +description: Use PowerBI and TDengine to analyze time series data +--- + +## Introduction + +With TDengine ODBC driver, PowerBI can access time series data stored in TDengine. You can import tag data, original time series data, or aggregated data into PowerBI from TDengine, to create reports or dashboard without any coding effort. + +## Steps +![Power BI use step](./powerbi-step-en.webp) + +### Prerequisites + +1. TDengine server has been installed and running well. +2. Power BI Desktop has been installed and running. (If not, please download and install latest Windows X64 version from [PowerBI](https://www.microsoft.com/download/details.aspx?id=58494). + + +## Install Driver + +Depending on your TDengine server version, download appropriate version of TDengine client package from TDengine website [Download Link](https://docs.taosdata.com/get-started/package/), or TDengine explorer if you are using a local TDengine cluster. Install the TDengine client package on same Windows machine where PowerBI is running. + +### Configure Data Source + +Please refer to [ODBC](../../client-libraries/odbc) to configure TDengine ODBC Driver with WebSocket connection. + +### Import Data from TDengine to Power BI + +1. Open Power BI and logon, add data source following steps "Home Page" -> "Get Data" -> "Others" -> "ODBC" -> "Connect" + +2. Choose data source name, connect to configured data source, go to the nativator, browse tables of the selected database and load data + +3. If you want to input some specific SQL, click "Advanced Options", and input your SQL in the open dialogue box and load the data. + + +To better use Power BI to analyze the data stored in TDengine, you need to understand the concepts of dimention, metric, time serie, correlation, and use your own SQL to import data. + +1. Dimention: it's normally category (text) data to describe such information as device, collection point, model. In the supertable template of TDengine, we use tag columns to store the dimention information. You can use SQL like `select distinct tbname, tag1, tag2 from supertable` to get dimentions. + +2. Metric: quantitive (numeric) fileds that can be calculated, like SUM, AVERAGE, MINIMUM. If the collecting frequency is 1 second, then there are 31,536,000 records in one year, it will be too low efficient to import so big data into Power BI. In TDengine, you can use data partition query, window partition query, in combination with pseudo columns related to window, to import downsampled data into Power BI. For more details, please refer to [TDengine Specialized Queries](https://docs.taosdata.com/taos-sql/distinguished/)。 + + - Window partition query: for example, thermal meters collect one data per second, but you need to query the average temperature every 10 minutes, you can use window subclause to get the downsampling data you need. The corresponding SQL is like `select tbname, _wstart date,avg(temperature) temp from table interval(10m)`, in which _wstart is a pseudo column indicting the start time of a widow, 10m is the duration of the window, `avg(temperature)` indicates the aggregate value inside a window. + + - Data partition query: If you want to get the aggregate value of a lot of thermal meters, you can first partition the data and then perform a series of calculation in the partitioned data spaces. The SQL you need to use is `partition by part_list`. The most common of data partition usage is that when querying a supertable, you can partition data by subtable according to tags to form the data of each subtable into a single time serie to facilitate analytical processing of time series data. + +3. Time Serie: When curve plotting or aggregating data based on time lines, date is normally required. Data or time can be imported from Excel, or retrieved from TDengine using SQL statement like `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`, in which the fill() subclause indicates the fill mode when there is data missing, pseudo column _wstart indicates the date to retrieve. + +4. Correlation: Indicates how to correlate data. Dimentions and Metrics can be correlated by tbname, dates and metrics can be correlated by date. All these can cooperate to form visual reports. + +### Example - Meters + +TDengine has its own specific data model, which uses supertable as template and creates a specific table for each device. Each table can have maximum 4,096 data columns and 128 tags. In the example of meters, assume each meter generates one record per second, then there will be 86,400 records each day and 31,536,000 records every year, then only 1,000 meters will occupy 500GB disk space. So, the common usage of Power BI should be mapping tags to dimention columns, mapping the aggregation of data columns to metric columns, to provide indicators for decision makers. + +1. Import Dimentions + +Import the tags of tables in PowerBI, and name as "tags", the SQL is like `select distinct tbname, groupid, location from test.meters;`. + +2. Import Metrics + +In Power BI, import the average current, average voltage, average phase with 1 hour window, and name it as "data", the SQL is like `select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)` . + +3. Correlate Dimentions and Metrics + +In Power BI, open model view, correlate "tags" and "data", and set "tabname" as the correlation column, then you can use the data in histogram, pie chart, etc. For more information about building visual reports in PowerBI, please refer to [Power BI](https://learn.microsoft.com/power-bi/)。 \ No newline at end of file diff --git a/docs/en/20-third-party/powerbi-step-en.webp b/docs/en/20-third-party/powerbi-step-en.webp new file mode 100644 index 0000000000..5d6eff1ac2 Binary files /dev/null and b/docs/en/20-third-party/powerbi-step-en.webp differ diff --git a/docs/en/25-application/_03-immigrate.md b/docs/en/25-application/_03-immigrate.md index cdb3d5591c..4dc7ee711c 100644 --- a/docs/en/25-application/_03-immigrate.md +++ b/docs/en/25-application/_03-immigrate.md @@ -176,7 +176,7 @@ After completing the query, if the data written does not differ from what is exp TDengine does not support querying, or data fetching using the OpenTSDB query syntax but does provide a counterpart for each of the OpenTSDB queries. The corresponding query processing can be adapted and applied in a manner obtained by examining Appendix 1. To fully understand the types of queries supported by TDengine, refer to the TDengine user manual. -TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language connectors for querying and reading data to suit your application. Please read the user manual for specific operations and usage. +TDengine supports the standard JDBC 3.0 interface for manipulating databases, but you can also use other types of high-level language client libraries for querying and reading data to suit your application. Please read the user manual for specific operations and usage. ## Historical Data Migration diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index 715704a0c3..e8e1386197 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -54,7 +54,7 @@ This error indicates that the client could not connect to the server. Perform th 6. Verify that your firewall settings allow all hosts in the cluster to communicate on ports 6030 and 6041 (TCP and UDP). You can run `ufw status` (Ubuntu) or `firewall-cmd --list-port` (CentOS) to check the configuration. -7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable. +7. If you are using the Python, Java, Go, Rust, C#, or Node.js client library on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable. 8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `DYLD_LIBRARY_PATH` environment variable.. @@ -142,7 +142,7 @@ Timestamps are processed as follows: 1. The client uses its system timezone unless it has been configured otherwise. 2. A timezone configured in `taos.cfg` takes precedence over the system timezone. -3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL. +3. A timezone explicitly specified when establishing a connection to TDengine through a client library takes precedence over `taos.cfg` and the system timezone. For example, the Java client library allows you to specify a timezone in the JDBC URL. 4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method. ### 11. Which network ports are required by TDengine? diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index f945f53b5c..50913e87c8 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -23,7 +23,7 @@ import CDemo from "./_sub_c.mdx"; 为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。 -下面为关于数据订阅的一些说明,需要对TDengine的架构有一些了解,结合各个语言链接器的接口使用。 +下面为关于数据订阅的一些说明,需要对TDengine的架构有一些了解,结合各个语言链接器的接口使用。(可使用时再了解) - 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立; - 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据; - 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联; @@ -45,12 +45,13 @@ import CDemo from "./_sub_c.mdx"; 本文档不对消息队列本身的知识做更多的介绍,如果需要了解,请自行搜索。 +说明: 从3.2.0.0版本开始,数据订阅支持vnode迁移和分裂。 由于数据订阅依赖wal文件,而在vnode迁移和分裂的过程中,wal并不会同步过去,所以迁移或分裂后,之前没消费完的wal数据后消费不到。所以请保证之前把数据全部消费完后,再进行vnode迁移或分裂,否则,消费会丢失数据。 ## 主要数据结构和 API -不同语言下, TMQ 订阅相关的 API 及数据结构如下: +不同语言下, TMQ 订阅相关的 API 及数据结构如下(注意consumer结构不是线程安全的,在一个线程使用consumer时,不要在另一个线程close这个consumer): diff --git a/docs/zh/08-connector/50-odbc.mdx b/docs/zh/08-connector/50-odbc.mdx new file mode 100644 index 0000000000..0668aed7df --- /dev/null +++ b/docs/zh/08-connector/50-odbc.mdx @@ -0,0 +1,99 @@ +--- +sidebar_label: ODBC +title: TDengine ODBC +--- + + +## 简介 + +TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。 + +TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 + +注意:TDengine ODBC 只支持 64 位系统,调用 TDengine ODBC 必须通过 64 位的 ODBC 驱动管理器进行。因此调用 ODBC 的程序不能使用 32 位版本。 + +想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 + +## 安装 + +1. 仅支持 Windows 平台。Windows 上需要安装过 VC 运行时库,可在此下载安装 [VC运行时库](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) 如果已经安装VS开发工具可忽略。 + +2. 安装 TDengine Windows 客户端,版本在 3.2.1.0 或以上,都会包含 TDengine 的 ODBC 驱动。 + +## 配置数据源 + +### 数据源连接类型与区别 + +TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 Native 连接,其区别如下: + +1. 只有 Websocket 支持连接云服务 + +2. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 + +3. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 + +4. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 + +### WebSocket 连接 + +1. 【开始】菜单搜索打开【ODBC 数据源(64 位)】管理工具(注意不要选择ODBC 数据源(32 位)) + +2. 选中【用户 DSN】标签页,通过【添加(D)】按钮进入"创建数据源"界面 + +3. 选择想要添加的数据源,这里我们选择【TDengine】 + +4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 + + ![ODBC websocket connection config](./assets/odbc-ws-config-zh.webp) + + 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 + + 4.2【Connection Type】 : 必选,选择连接类型,这里选择 【Websocket】 + + 4.3【URL】必填,ODBC 数据源 URL,example: `http://localhost:6041`, 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token` + + 4.4【Database】选填,需要连接的默认数据库 + + 4.5【User】仅供第5步测试连接使用,选填,数据库用户名,如果不填,TDengine 默认 root + + 4.6【Password】仅供第5步测试连接使用,选填,数据库用户密码,如果不填,TDengine 默认 taosdata + +5. 点【Test Connecting...】测试连接情况,如果成功,提示"connecting success" + +6. 点【确定】,即可保存配置并退出 + +7. 也可以在第2步选择已经配置好的数据源名通过【配置】按钮进入配置页面,修改已有配置 + +### 原生连接(不支持云服务) + +1. 【开始】菜单搜索打开【ODBC 数据源(64 位)】管理工具(注意不要选择ODBC 数据源(32 位)) + +2. 选中【用户 DSN】标签页,通过【添加(D)】按钮进入"创建数据源"界面 + +3. 选择想要添加的数据源,这里我们选择【TDengine】 + +4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 + + ![ODBC native connection config](./assets/odbc-native-config-zh.webp) + + 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 + + 4.2 【Connection Type】 : 必选,选择连接类型,这里选择 【Native】 原生连接; + + 4.3 【Server】必填,ODBC 数据源 Server 地址,example: `localhost:6030` + + 4.4 【Database】选填,需要连接的默认数据库 + + 4.5 【User】仅供第5步测试连接使用,选填,数据库用户名,如果不填,TDengine 默认 root + + 4.6 【Password】仅供第5步测试连接使用,选填,数据库用户密码,如果不填,TDengine 默认 taosdata + +5. 点【Test Connecting...】测试连接情况,如果成功,提示"connecting success" + +6. 点【确定】,即可保存配置并退出 + +7. 也可以在第2步选择已经配置好的数据源名通过【配置】按钮进入配置页面,修改已有配置 + +## 与第三方集成 + +作为使用 TDengine ODBC driver 的一个示例,你可以使用 Power BI 与 TDengine 分析时序数据。更多细节请参考 [Power BI](../../third-party/powerbi) diff --git a/docs/zh/08-connector/assets/odbc-check-data.webp b/docs/zh/08-connector/assets/odbc-check-data.webp new file mode 100644 index 0000000000..bc7867c089 Binary files /dev/null and b/docs/zh/08-connector/assets/odbc-check-data.webp differ diff --git a/docs/zh/08-connector/assets/odbc-native-config-zh.webp b/docs/zh/08-connector/assets/odbc-native-config-zh.webp new file mode 100644 index 0000000000..ed9005f2a1 Binary files /dev/null and b/docs/zh/08-connector/assets/odbc-native-config-zh.webp differ diff --git a/docs/zh/08-connector/assets/odbc-ws-config-zh.webp b/docs/zh/08-connector/assets/odbc-ws-config-zh.webp new file mode 100644 index 0000000000..c8a6e11011 Binary files /dev/null and b/docs/zh/08-connector/assets/odbc-ws-config-zh.webp differ diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md index c3e0e9a07a..58625d22e2 100644 --- a/docs/zh/12-taos-sql/02-database.md +++ b/docs/zh/12-taos-sql/02-database.md @@ -46,6 +46,7 @@ database_option: { - last_row:表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。 - last_value:表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。 - both:表示同时打开缓存最近行和列功能。 + Note:CacheModel 值来回切换有可能导致 last/last_row 的查询结果不准确,请谨慎操作。推荐保持打开。 - CACHESIZE:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。 - COMP:表示数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。 - 0:表示不压缩。 @@ -137,6 +138,10 @@ alter_database_option: { 如果 cacheload 非常接近 cachesize,则 cachesize 可能过小。 如果 cacheload 明显小于 cachesize 则 cachesize 是够用的。可以根据这个原则判断是否需要修改 cachesize 。具体修改值可以根据系统可用内存情况来决定是加倍或者是提高几倍。 +4. stt_trigger + +在修改 stt_trigger 参数之前请先停止数据库写入。 + :::note 其它参数在 3.0.0.0 中暂不支持修改 diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md index 583d047c43..1f31698d88 100644 --- a/docs/zh/12-taos-sql/05-insert.md +++ b/docs/zh/12-taos-sql/05-insert.md @@ -158,7 +158,7 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c 自动建表, 表名通过tbname列指定 ```sql INSERT INTO meters(tbname, location, groupId, ts, current, phase) - values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 0.32) - ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 0.33) - ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 0.33) + values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) + ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) + ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) ``` diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 04508ceede..23ae025610 100755 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -259,7 +259,12 @@ GROUP BY 子句中的表达式可以包含表或视图中的任何列,这些 ## PARTITION BY -PARTITION BY 子句是 TDengine 特色语法,按 part_list 对数据进行切分,在每个切分的分片中进行计算。 +PARTITION BY 子句是 TDengine 3.0版本引入的特色语法,用于根据 part_list 对数据进行切分,在每个切分的分片中可以进行各种计算。 + +PARTITION BY 与 GROUP BY 基本含义相似,都是按照指定列表进行数据分组然后进行计算,不同点在于 PARTITION BY 没有 GROUP BY 子句的 SELECT 列表的各种限制,组内可以进行任意运算(常量、聚合、标量、表达式等),因此在使用上 PARTITION BY 完全兼容 GROUP BY,所有使用 GROUP BY 子句的地方都可以替换为 PARTITION BY。 + +因为 PARTITION BY 没有返回一行聚合数据的要求,因此还可以支持在分组切片后的各种窗口运算,所有需要分组进行的窗口运算都只能使用 PARTITION BY 子句。 + 详见 [TDengine 特色查询](../distinguished) diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 26313390a6..66322d55f1 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -1,1499 +1,1500 @@ ---- -sidebar_label: 函数 -title: 函数 -description: TDengine 支持的函数列表 -toc_max_heading_level: 4 ---- - -## 单行函数 - -单行函数为查询结果中的每一行返回一个结果行。 - -### 数学函数 - -#### ABS - -```sql -ABS(expr) -``` - -**功能说明**:获得指定字段的绝对值。 - -**返回结果类型**:与指定字段的原始数据类型一致。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### ACOS - -```sql -ACOS(expr) -``` - -**功能说明**:获得指定字段的反余弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### ASIN - -```sql -ASIN(expr) -``` - -**功能说明**:获得指定字段的反正弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### ATAN - -```sql -ATAN(expr) -``` - -**功能说明**:获得指定字段的反正切结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### CEIL - -```sql -CEIL(expr) -``` - -**功能说明**:获得指定字段的向上取整数的结果。 - -**返回结果类型**:与指定字段的原始数据类型一致。 - -**适用数据类型**:数值类型。 - -**适用于**: 表和超级表。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### COS - -```sql -COS(expr) -``` - -**功能说明**:获得指定字段的余弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### FLOOR - -```sql -FLOOR(expr) -``` - -**功能说明**:获得指定字段的向下取整数的结果。 - 其他使用说明参见 CEIL 函数描述。 - -#### LOG - -```sql -LOG(expr1[, expr2]) -``` - -**功能说明**:获得 expr1 对于底数 expr2 的对数。如果 expr2 参数省略,则返回指定字段的自然对数值。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### POW - -```sql -POW(expr1, expr2) -``` - -**功能说明**:获得 expr1 的指数为 expr2 的幂。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - - -#### ROUND - -```sql -ROUND(expr) -``` - -**功能说明**:获得指定字段的四舍五入的结果。 - 其他使用说明参见 CEIL 函数描述。 - - -#### SIN - -```sql -SIN(expr) -``` - -**功能说明**:获得指定字段的正弦结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### SQRT - -```sql -SQRT(expr) -``` - -**功能说明**:获得指定字段的平方根。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -#### TAN - -```sql -TAN(expr) -``` - -**功能说明**:获得指定字段的正切结果。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 - -### 字符串函数 - -字符串函数的输入参数为字符串类型,返回结果为数值类型或字符串类型。 - -#### CHAR_LENGTH - -```sql -CHAR_LENGTH(expr) -``` - -**功能说明**:以字符计数的字符串长度。 - -**返回结果类型**:BIGINT。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -#### CONCAT - -```sql -CONCAT(expr1, expr2 [, expr] ... ) -``` - -**功能说明**:字符串连接函数。 - -**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 - -**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### CONCAT_WS - -```sql -CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...) -``` - -**功能说明**:带分隔符的字符串连接函数。 - -**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 - -**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### LENGTH - -```sql -LENGTH(expr) -``` - -**功能说明**:以字节计数的长度。 - -**返回结果类型**:BIGINT。 - -**适用数据类型**:VARCHAR, NCHAR, VARBINARY。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### LOWER - -```sql -LOWER(expr) -``` - -**功能说明**:将字符串参数值转换为全小写字母。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### LTRIM - -```sql -LTRIM(expr) -``` - -**功能说明**:返回清除左边空格后的字符串。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### RTRIM - -```sql -RTRIM(expr) -``` - -**功能说明**:返回清除右边空格后的字符串。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### SUBSTR - -```sql -SUBSTR(expr, pos [,len]) -``` - -**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。如果输入参数 len 被忽略,返回的子串包含从 pos 开始的整个字串。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。输入参数 pos 可以为正数,也可以为负数。如果 pos 是正数,表示开始位置从字符串开头正数计算。如果 pos 为负数,表示开始位置从字符串结尾倒数计算。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### UPPER - -```sql -UPPER(expr) -``` - -**功能说明**:将字符串参数值转换为全大写字母。 - -**返回结果类型**:与输入字段的原始类型相同。 - -**适用数据类型**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -### 转换函数 - -转换函数将值从一种数据类型转换为另一种数据类型。 - -#### CAST - -```sql -CAST(expr AS type_name) -``` - -**功能说明**:数据类型转换函数,返回 expr 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。 - -**返回结果类型**:CAST 中指定的类型(type_name)。 - -**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY,则 expr 只能是 VARCHAR 类型。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**: - -- 对于不能支持的类型转换会直接报错。 -- 对于类型支持但某些值无法正确转换的情况,对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: - 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。 - 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。 - 3)转换到字符串类型时,如果转换后长度超过type_name中指定的长度,则会截断,但不会报错。 - -#### TO_ISO8601 - -```sql -TO_ISO8601(expr [, timezone]) -``` - -**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 - -**返回结果数据类型**:VARCHAR 类型。 - -**适用数据类型**:INTEGER, TIMESTAMP。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - -**使用说明**: - -- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 -- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; -- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 - - -#### TO_JSON - -```sql -TO_JSON(str_literal) -``` - -**功能说明**: 将字符串常量转换为 JSON 类型。 - -**返回结果数据类型**: JSON。 - -**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**: 表和超级表。 - - -#### TO_UNIXTIMESTAMP - -```sql -TO_UNIXTIMESTAMP(expr [, return_timestamp]) - -return_timestamp: { - 0 - | 1 -} -``` - -**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。 - -**返回结果数据类型**:BIGINT, TIMESTAMP。 - -**应用字段**:VARCHAR, NCHAR。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 -- return_timestamp 指定函数返回值是否为时间戳类型,设置为1时返回 TIMESTAMP 类型,设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。 - -#### TO_CHAR - -```sql -TO_CHAR(ts, format_str_literal) -``` - -**功能说明**: 将timestamp类型按照指定格式转换为字符串 - -**返回结果数据类型**: VARCHAR - -**应用字段**: TIMESTAMP - -**嵌套子查询支持**: 适用于内层查询和外层查询 - -**适用于**: 表和超级表 - -**支持的格式** - -| **格式** | **说明**| **例子** | -| --- | --- | --- | -|AM,am,PM,pm| 无点分隔的上午下午 | 07:00:00am| -|A.M.,a.m.,P.M.,p.m.| 有点分隔的上午下午| 07:00:00a.m.| -|YYYY,yyyy|年, 4个及以上数字| 2023-10-10| -|YYY,yyy| 年, 最后3位数字| 023-10-10| -|YY,yy| 年, 最后2位数字| 23-10-10| -|Y,y|年, 最后一位数字| 3-10-10| -|MONTH|月, 全大写| 2023-JANUARY-01| -|Month|月, 首字母大写| 2023-January-01| -|month|月, 全小写| 2023-january-01| -|MON| 月, 缩写, 全大写(三个字符)| JAN, SEP| -|Mon| 月, 缩写, 首字母大写| Jan, Sep| -|mon|月, 缩写, 全小写| jan, sep| -|MM,mm|月, 数字 01-12|2023-01-01| -|DD,dd|月日, 01-31|| -|DAY|周日, 全大写|MONDAY| -|Day|周日, 首字符大写|Monday| -|day|周日, 全小写|monday| -|DY|周日, 缩写, 全大写|MON| -|Dy|周日, 缩写, 首字符大写|Mon| -|dy|周日, 缩写, 全小写|mon| -|DDD|年日, 001-366|| -|D,d|周日, 数字, 1-7, Sunday(1) to Saturday(7)|| -|HH24,hh24|小时, 00-23|2023-01-30 23:59:59| -|hh12,HH12, hh, HH| 小时, 01-12|2023-01-30 12:59:59PM| -|MI,mi|分钟, 00-59|| -|SS,ss|秒, 00-59|| -|MS,ms|毫秒, 000-999|| -|US,us|微秒, 000000-999999|| -|NS,ns|纳秒, 000000000-999999999|| -|TZH,tzh|时区小时|2023-01-30 11:59:59PM +08| - -**使用说明**: -- `Month`, `Day`等的输出格式是左对齐的, 右侧添加空格, 如`2023-OCTOBER -01`, `2023-SEPTEMBER-01`, 9月是月份中英文字母数最长的, 因此9月没有空格. 星期类似. -- 使用`ms`, `us`, `ns`时, 以上三种格式的输出只在精度上不同, 比如ts为 `1697182085123`, `ms` 的输出为 `123`, `us` 的输出为 `123000`, `ns` 的输出为 `123000000`. -- 时间格式中无法匹配规则的内容会直接输出. 如果想要在格式串中指定某些能够匹配规则的部分不做转换, 可以使用双引号, 如`to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. 如果想要输出双引号, 那么在双引号之前加一个反斜杠, 如 `to_char(ts, '\"yyyy-mm-dd\"')` 将会输出 `"2023-10-10"`. -- 那些输出是数字的格式, 如`YYYY`, `DD`, 大写与小写意义相同, 即`yyyy` 和 `YYYY` 可以互换. -- 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区. -- 输入时间戳的精度由所查询表的精度确定, 若未指定表, 则精度为毫秒. - -#### TO_TIMESTAMP - -```sql -TO_TIMESTAMP(ts_str_literal, format_str_literal) -``` - -**功能说明**: 将字符串按照指定格式转化为时间戳. - -**返回结果数据类型**: TIMESTAMP - -**应用字段**: VARCHAR - -**嵌套子查询支持**: 适用于内层查询和外层查询 - -**适用于**: 表和超级表 - -**支持的格式**: 与`to_char`相同 - -**使用说明**: -- 若`ms`, `us`, `ns`同时指定, 那么结果时间戳包含上述三个字段的和. 如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出为 `2023-10-10 10:10:10.123456789`对应的时间戳. -- `MONTH`, `MON`, `DAY`, `DY` 以及其他输出为数字的格式的大小写意义相同, 如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month`可以被替换为`MONTH` 或者`Month`. -- 如果同一字段被指定了多次, 那么前面的指定将会被覆盖. 如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, 输出年份是`2022`. -- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。 -- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`, 未指定部分使用该默认值中的对应部分. 暂不支持只指定年日而不指定月日的格式, 如'yyyy-mm-DDD', 支持'yyyy-mm-DD'. -- 如果格式串中有`AM`, `PM`等, 那么小时必须是12小时制, 范围必须是01-12. -- `to_timestamp`转换具有一定的容错机制, 在格式串和时间戳串不完全对应时, 有时也可转换, 如: `to_timestamp('200101/2', 'yyyyMM1/dd')`, 格式串中多出来的1会被丢弃. 格式串与时间戳串中多余的空格字符(空格, tab等)也会被 自动忽略. 如`to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换. 虽然`MM`等字段需要两个数字对应(只有一位时前面补0), 在`to_timestamp`时, 一个数字也可以成功转换. -- 输出时间戳的精度与查询表的精度相同, 若查询未指定表, 则输出精度为毫秒. 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')`的输出将会把微妙和纳秒进行截断. 如果指定一张纳秒表, 那么就不会发生截断, 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`. - - -### 时间和日期函数 - -时间和日期函数对时间戳类型进行操作。 - -所有返回当前时间的函数,如NOW、TODAY和TIMEZONE,在一条SQL语句中不论出现多少次都只会被计算一次。 - -#### NOW - -```sql -NOW() -``` - -**功能说明**:返回客户端当前系统时间。 - -**返回结果数据类型**:TIMESTAMP。 - -**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 - -**适用于**:表和超级表。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: - -- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下: - b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 - - -#### TIMEDIFF - -```sql -TIMEDIFF(expr1, expr2 [, time_unit]) -``` - -**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 - -**返回结果数据类型**:BIGINT。 - -**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 - -**适用于**:表和超级表。 - -**嵌套子查询支持**:适用于内层查询和外层查询。 - -**使用说明**: -- 支持的时间单位 time_unit 如下: - 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 -- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 -- 输入包含不符合时间日期格式的字符串则返回 NULL。 - - -#### TIMETRUNCATE - -```sql -TIMETRUNCATE(expr, time_unit [, ignore_timezone]) - -ignore_timezone: { - 0 - | 1 -} -``` - -**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。 - -**返回结果数据类型**:TIMESTAMP。 - -**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 - -**适用于**:表和超级表。 - -**使用说明**: -- 支持的时间单位 time_unit 如下: - 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 -- 输入包含不符合时间日期格式的字符串则返回 NULL。 -- 当使用 1d 作为时间单位对时间戳进行截断时, 可通过设置 ignore_timezone 参数指定返回结果的显示是否忽略客户端时区的影响。 - 例如客户端所配置时区为 UTC+0800, 则 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) 返回结果为 '2020-01-01 08:00:00'。 - 而使用 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) 设置忽略时区时,返回结果为 '2020-01-01 00:00:00' - ignore_timezone 如果忽略的话,则默认值为 1 。 - - - -#### TIMEZONE - -```sql -TIMEZONE() -``` - -**功能说明**:返回客户端当前时区信息。 - -**返回结果数据类型**:VARCHAR。 - -**应用字段**:无 - -**适用于**:表和超级表。 - - -#### TODAY - -```sql -TODAY() -``` - -**功能说明**:返回客户端当日零时的系统时间。 - -**返回结果数据类型**:TIMESTAMP。 - -**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下: - b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。 -- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 - - -## 聚合函数 - -聚合函数为查询结果集的每一个分组返回单个结果行。可以由 GROUP BY 或窗口切分子句指定分组,如果没有,则整个查询结果集视为一个分组。 - -TDengine 支持针对数据的聚合查询。提供如下聚合函数。 - -### APERCENTILE - -```sql -APERCENTILE(expr, p [, algo_type]) - -algo_type: { - "default" - | "t-digest" -} -``` - -**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。 - -**返回数据类型**: DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**说明**: -- p值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。 -- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。 -- "t-digest"算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。 - -### AVG - -```sql -AVG(expr) -``` - -**功能说明**:统计指定字段的平均值。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### COUNT - -```sql -COUNT({* | expr}) -``` - -**功能说明**:统计指定字段的记录行数。 - -**返回数据类型**:BIGINT。 - -**适用数据类型**:全部类型字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。 -- 如果统计字段是具体的列,则返回该列中非 NULL 值的记录数量。 - - -### ELAPSED - -```sql -ELAPSED(ts_primary_key [, time_unit]) -``` - -**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 - -**返回结果类型**:DOUBLE。 - -**适用数据类型**:TIMESTAMP。 - -**适用于**: 表,超级表,嵌套查询的外层查询 - -**说明**: -- ts_primary_key参数只能是表的第一列,即 TIMESTAMP 类型的主键列。 -- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下: - 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 -- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 -- order by asc/desc不影响差值的计算结果。 -- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 -- 对于普通表,不支持和group by子句组合使用。 -- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 -- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 - - -### LEASTSQUARES - -```sql -LEASTSQUARES(expr, start_val, step_val) -``` - -**功能说明**:统计表中某列的值的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 - -**返回数据类型**:字符串表达式(斜率, 截距)。 - -**适用数据类型**:expr 必须是数值类型。 - -**适用于**:表。 - - -### SPREAD - -```sql -SPREAD(expr) -``` - -**功能说明**:统计表中某列的最大值和最小值之差。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:INTEGER, TIMESTAMP。 - -**适用于**:表和超级表。 - - -### STDDEV - -```sql -STDDEV(expr) -``` - -**功能说明**:统计表中某列的均方差。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### SUM - -```sql -SUM(expr) -``` - -**功能说明**:统计表/超级表中某列的和。 - -**返回数据类型**:DOUBLE, BIGINT。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### HYPERLOGLOG - -```sql -HYPERLOGLOG(expr) -``` - -**功能说明**: - - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。 - - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。 - -**返回结果类型**:INTEGER。 - -**适用数据类型**:任何类型。 - -**适用于**:表和超级表。 - - -### HISTOGRAM - -```sql -HISTOGRAM(expr,bin_type, bin_description, normalized) -``` - -**功能说明**:统计数据按照用户指定区间的分布。 - -**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为 DOUBLE 类型,否则为 BIGINT 类型。 - -**适用数据类型**:数值型字段。 - -**适用于**: 表和超级表。 - -**详细说明**: -- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 -- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): - - "user_input": "[1, 3, 5, 7]" - 用户指定 bin 的具体数值。 - - - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, - 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 - - - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, - 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 -- normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 - - -### PERCENTILE - -```sql -PERCENTILE(expr, p [, p1] ... ) -``` - -**功能说明**:统计表中某列的值百分比分位数。 - -**返回数据类型**: 该函数最小参数个数为 2 个,最大参数个数为 11 个。可以最多同时返回 10 个百分比分位数。当参数个数为 2 时, 返回一个分位数, 类型为DOUBLE,当参数个数大于 2 时,返回类型为VARCHAR, 格式为包含多个返回值的JSON数组。 - -**应用字段**:数值类型。 - -**适用于**:表。 - -**使用说明**: - -- *P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX; -- 同时计算针对同一列的多个分位数时,建议使用一个PERCENTILE函数和多个参数的方式,能很大程度上降低查询的响应时间。 - 比如,使用查询SELECT percentile(col, 90, 95, 99) FROM table, 性能会优于SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table。 - - -## 选择函数 - -选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。 - -### BOTTOM - -```sql -BOTTOM(expr, k) -``` - -**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- *k*值取值范围 1≤*k*≤100; -- 系统同时返回该记录关联的时间戳列; -- 限制:BOTTOM 函数不支持 FILL 子句。 - -### FIRST - -```sql -FIRST(expr) -``` - -**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:所有字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*); -- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL; -- 如果结果集中所有列全部为 NULL 值,则不返回结果。 - -### INTERP - -```sql -INTERP(expr [, ignore_null_values]) - -ignore_null_values: { - 0 - | 1 -} -``` - -**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。 - -**返回数据类型**:同字段类型。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明** - -- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 -- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 -- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。 -- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 -- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. -- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) -- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear). -- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 -- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。 -- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。 - -### LAST - -```sql -LAST(expr) -``` - -**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:所有字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); -- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 -- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 - - -### LAST_ROW - -```sql -LAST_ROW(expr) -``` - -**功能说明**:返回表/超级表的最后一条记录。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:所有字段。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 -- 不能与 INTERVAL 一起使用。 - -### MAX - -```sql -MAX(expr) -``` - -**功能说明**:统计表/超级表中某列的值最大值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### MIN - -```sql -MIN(expr) -``` - -**功能说明**:统计表/超级表中某列的值最小值。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### MODE - -```sql -MODE(expr) -``` - -**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,则随机输出其中某个值。 - -**返回数据类型**:与输入数据类型一致。 - -**适用数据类型**:全部类型字段。 - -**适用于**:表和超级表。 - - -### SAMPLE - -```sql -SAMPLE(expr, k) -``` - -**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - -**返回结果类型**: 同原始数据类型。 - -**适用数据类型**: 全部类型字段。 - -**嵌套子查询支持**: 适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - - -### TAIL - -```sql -TAIL(expr, k [, offset_rows]) -``` - -**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 - -**参数范围**:k: [1,100] offset_val: [0,100]。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:适合于除时间主键列外的任何类型。 - -**适用于**:表、超级表。 - - -### TOP - -```sql -TOP(expr, k) -``` - -**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- *k*值取值范围 1≤*k*≤100; -- 系统同时返回该记录关联的时间戳列; -- 限制:TOP 函数不支持 FILL 子句。 - -### UNIQUE - -```sql -UNIQUE(expr) -``` - -**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。 - -**返回数据类型**:同应用的字段。 - -**适用数据类型**:全部类型字段。 - -**适用于**: 表和超级表。 - - -## 时序数据特有函数 - -时序数据特有函数是 TDengine 为了满足时序数据的查询场景而量身定做出来的。在通用数据库中,实现类似功能通常需要复杂的查询语法,且效率很低。TDengine 以函数的方式内置了这些功能,最大程度的减轻了用户的使用成本。 - -### CSUM - -```sql -CSUM(expr) -``` - -**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。 - -**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**: 适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 -- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - - -### DERIVATIVE - -```sql -DERIVATIVE(expr, time_interval, ignore_negative) - -ignore_negative: { - 0 - | 1 -} -``` - -**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 - -### DIFF - -```sql -DIFF(expr [, ignore_negative]) - -ignore_negative: { - 0 - | 1 -} -``` - -**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。 - -**返回数据类型**:同应用字段。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 输出结果行数是范围内总行数减一,第一行没有结果输出。 -- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 - - -### IRATE - -```sql -IRATE(expr) -``` - -**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -### MAVG - -```sql -MAVG(expr, k) -``` - -**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - -**返回结果类型**: DOUBLE。 - -**适用数据类型**: 数值类型。 - -**嵌套子查询支持**: 适用于内层查询和外层查询。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); -- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - - -### STATECOUNT - -```sql -STATECOUNT(expr, oper, val) -``` - -**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 - -**参数范围**: - -- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。 -- val : 数值型 - -**返回结果类型**:INTEGER。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:不支持应用在子查询上。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 - - -### STATEDURATION - -```sql -STATEDURATION(expr, oper, val, unit) -``` - -**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 - -**参数范围**: - -- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。 -- val : 数值型 -- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。 - -**返回结果类型**:INTEGER。 - -**适用数据类型**:数值类型。 - -**嵌套子查询支持**:不支持应用在子查询上。 - -**适用于**:表和超级表。 - -**使用说明**: - -- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 - - -### TWA - -```sql -TWA(expr) -``` - -**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。 - -**返回数据类型**:DOUBLE。 - -**适用数据类型**:数值类型。 - -**适用于**:表和超级表。 - - -## 系统信息函数 - -### DATABASE - -```sql -SELECT DATABASE(); -``` - -**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用USE命令切换数据库,则返回NULL。 - - -### CLIENT_VERSION - -```sql -SELECT CLIENT_VERSION(); -``` - -**说明**:返回客户端版本。 - -### SERVER_VERSION - -```sql -SELECT SERVER_VERSION(); -``` - -**说明**:返回服务端版本。 - -### SERVER_STATUS - -```sql -SELECT SERVER_STATUS(); -``` - -**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。 - -### CURRENT_USER - -```sql -SELECT CURRENT_USER(); -``` - -**说明**:获取当前用户。 - - -## Geometry 函数 - -### Geometry 输入函数: - -#### ST_GeomFromText - -```sql -ST_GeomFromText(VARCHAR WKT expr) -``` - -**功能说明**:根据 Well-Known Text (WKT) 表示从指定的几何值创建几何数据。 - -**返回值类型**:GEOMETRY - -**适用数据类型**:VARCHAR - -**适用表类型**:标准表和超表 - -**使用说明**:输入可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。输出是以二进制字符串形式定义的 GEOMETRY 数据类型。 - -### Geometry 输出函数: - -#### ST_AsText - -```sql -ST_AsText(GEOMETRY geom) -``` - -**功能说明**:从几何数据中返回指定的 Well-Known Text (WKT) 表示。 - -**返回值类型**:VARCHAR - -**适用数据类型**:GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:输出可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。 - -### Geometry 关系函数: - -#### ST_Intersects - -```sql -ST_Intersects(GEOMETRY geomA, GEOMETRY geomB) -``` - -##功能说明**:比较两个几何对象,并在它们相交时返回 true。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:如果两个几何对象有任何一个共享点,则它们相交。 - -#### ST_Equals - -```sql -ST_Equals(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果给定的几何对象是"空间相等"的,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:"空间相等"意味着 ST_Contains(A,B) = true 和 ST_Contains(B,A) = true,并且点的顺序可能不同,但表示相同的几何结构。 - -#### ST_Touches - -```sql -ST_Touches(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 A 和 B 相交,但它们的内部不相交,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:A 和 B 至少有一个公共点,并且这些公共点位于至少一个边界中。对于点/点输入,关系始终为 FALSE,因为点没有边界。 - -#### ST_Covers - -```sql -ST_Covers(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 B 中的每个点都位于几何形状 A 内部(与内部或边界相交),则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:A 包含 B 意味着 B 中的没有点位于 A 的外部(在外部)。 - -#### ST_Contains - -```sql -ST_Contains(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 A 包含 B,描述:如果几何形状 A 包含几何形状 B,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:A 包含 B 当且仅当 B 的所有点位于 A 的内部(即位于内部或边界上)(或等效地,B 的没有点位于 A 的外部),并且 A 和 B 的内部至少有一个公共点。 - -#### ST_ContainsProperly - -```sql -ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB) -``` - -**功能说明**:如果 B 的每个点都位于 A 内部,则返回 TRUE。 - -**返回值类型**:BOOL - -**适用数据类型**:GEOMETRY,GEOMETRY - -**适用表类型**:标准表和超表 - -**使用说明**:B 的没有点位于 A 的边界或外部。 +--- +sidebar_label: 函数 +title: 函数 +description: TDengine 支持的函数列表 +toc_max_heading_level: 4 +--- + +## 单行函数 + +单行函数为查询结果中的每一行返回一个结果行。 + +### 数学函数 + +#### ABS + +```sql +ABS(expr) +``` + +**功能说明**:获得指定字段的绝对值。 + +**返回结果类型**:与指定字段的原始数据类型一致。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### ACOS + +```sql +ACOS(expr) +``` + +**功能说明**:获得指定字段的反余弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### ASIN + +```sql +ASIN(expr) +``` + +**功能说明**:获得指定字段的反正弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### ATAN + +```sql +ATAN(expr) +``` + +**功能说明**:获得指定字段的反正切结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### CEIL + +```sql +CEIL(expr) +``` + +**功能说明**:获得指定字段的向上取整数的结果。 + +**返回结果类型**:与指定字段的原始数据类型一致。 + +**适用数据类型**:数值类型。 + +**适用于**: 表和超级表。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**使用说明**: 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### COS + +```sql +COS(expr) +``` + +**功能说明**:获得指定字段的余弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### FLOOR + +```sql +FLOOR(expr) +``` + +**功能说明**:获得指定字段的向下取整数的结果。 + 其他使用说明参见 CEIL 函数描述。 + +#### LOG + +```sql +LOG(expr1[, expr2]) +``` + +**功能说明**:获得 expr1 对于底数 expr2 的对数。如果 expr2 参数省略,则返回指定字段的自然对数值。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### POW + +```sql +POW(expr1, expr2) +``` + +**功能说明**:获得 expr1 的指数为 expr2 的幂。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + + +#### ROUND + +```sql +ROUND(expr) +``` + +**功能说明**:获得指定字段的四舍五入的结果。 + 其他使用说明参见 CEIL 函数描述。 + + +#### SIN + +```sql +SIN(expr) +``` + +**功能说明**:获得指定字段的正弦结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### SQRT + +```sql +SQRT(expr) +``` + +**功能说明**:获得指定字段的平方根。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +#### TAN + +```sql +TAN(expr) +``` + +**功能说明**:获得指定字段的正切结果。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。 + +### 字符串函数 + +字符串函数的输入参数为字符串类型,返回结果为数值类型或字符串类型。 + +#### CHAR_LENGTH + +```sql +CHAR_LENGTH(expr) +``` + +**功能说明**:以字符计数的字符串长度。 + +**返回结果类型**:BIGINT。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +#### CONCAT + +```sql +CONCAT(expr1, expr2 [, expr] ... ) +``` + +**功能说明**:字符串连接函数。 + +**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 + +**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### CONCAT_WS + +```sql +CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...) +``` + +**功能说明**:带分隔符的字符串连接函数。 + +**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。 + +**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### LENGTH + +```sql +LENGTH(expr) +``` + +**功能说明**:以字节计数的长度。 + +**返回结果类型**:BIGINT。 + +**适用数据类型**:VARCHAR, NCHAR, VARBINARY。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### LOWER + +```sql +LOWER(expr) +``` + +**功能说明**:将字符串参数值转换为全小写字母。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### LTRIM + +```sql +LTRIM(expr) +``` + +**功能说明**:返回清除左边空格后的字符串。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### RTRIM + +```sql +RTRIM(expr) +``` + +**功能说明**:返回清除右边空格后的字符串。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### SUBSTR + +```sql +SUBSTR(expr, pos [,len]) +``` + +**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。如果输入参数 len 被忽略,返回的子串包含从 pos 开始的整个字串。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。输入参数 pos 可以为正数,也可以为负数。如果 pos 是正数,表示开始位置从字符串开头正数计算。如果 pos 为负数,表示开始位置从字符串结尾倒数计算。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### UPPER + +```sql +UPPER(expr) +``` + +**功能说明**:将字符串参数值转换为全大写字母。 + +**返回结果类型**:与输入字段的原始类型相同。 + +**适用数据类型**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +### 转换函数 + +转换函数将值从一种数据类型转换为另一种数据类型。 + +#### CAST + +```sql +CAST(expr AS type_name) +``` + +**功能说明**:数据类型转换函数,返回 expr 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。 + +**返回结果类型**:CAST 中指定的类型(type_name)。 + +**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY,则 expr 只能是 VARCHAR 类型。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**: + +- 对于不能支持的类型转换会直接报错。 +- 对于类型支持但某些值无法正确转换的情况,对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: + 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。 + 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。 + 3)转换到字符串类型时,如果转换后长度超过type_name中指定的长度,则会截断,但不会报错。 + +#### TO_ISO8601 + +```sql +TO_ISO8601(expr [, timezone]) +``` + +**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 + +**返回结果数据类型**:VARCHAR 类型。 + +**适用数据类型**:INTEGER, TIMESTAMP。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + +**使用说明**: + +- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 +- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; +- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 + + +#### TO_JSON + +```sql +TO_JSON(str_literal) +``` + +**功能说明**: 将字符串常量转换为 JSON 类型。 + +**返回结果数据类型**: JSON。 + +**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**: 表和超级表。 + + +#### TO_UNIXTIMESTAMP + +```sql +TO_UNIXTIMESTAMP(expr [, return_timestamp]) + +return_timestamp: { + 0 + | 1 +} +``` + +**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。 + +**返回结果数据类型**:BIGINT, TIMESTAMP。 + +**应用字段**:VARCHAR, NCHAR。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- return_timestamp 指定函数返回值是否为时间戳类型,设置为1时返回 TIMESTAMP 类型,设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。 + +#### TO_CHAR + +```sql +TO_CHAR(ts, format_str_literal) +``` + +**功能说明**: 将timestamp类型按照指定格式转换为字符串 + +**返回结果数据类型**: VARCHAR + +**应用字段**: TIMESTAMP + +**嵌套子查询支持**: 适用于内层查询和外层查询 + +**适用于**: 表和超级表 + +**支持的格式** + +| **格式** | **说明**| **例子** | +| --- | --- | --- | +|AM,am,PM,pm| 无点分隔的上午下午 | 07:00:00am| +|A.M.,a.m.,P.M.,p.m.| 有点分隔的上午下午| 07:00:00a.m.| +|YYYY,yyyy|年, 4个及以上数字| 2023-10-10| +|YYY,yyy| 年, 最后3位数字| 023-10-10| +|YY,yy| 年, 最后2位数字| 23-10-10| +|Y,y|年, 最后一位数字| 3-10-10| +|MONTH|月, 全大写| 2023-JANUARY-01| +|Month|月, 首字母大写| 2023-January-01| +|month|月, 全小写| 2023-january-01| +|MON| 月, 缩写, 全大写(三个字符)| JAN, SEP| +|Mon| 月, 缩写, 首字母大写| Jan, Sep| +|mon|月, 缩写, 全小写| jan, sep| +|MM,mm|月, 数字 01-12|2023-01-01| +|DD,dd|月日, 01-31|| +|DAY|周日, 全大写|MONDAY| +|Day|周日, 首字符大写|Monday| +|day|周日, 全小写|monday| +|DY|周日, 缩写, 全大写|MON| +|Dy|周日, 缩写, 首字符大写|Mon| +|dy|周日, 缩写, 全小写|mon| +|DDD|年日, 001-366|| +|D,d|周日, 数字, 1-7, Sunday(1) to Saturday(7)|| +|HH24,hh24|小时, 00-23|2023-01-30 23:59:59| +|hh12,HH12, hh, HH| 小时, 01-12|2023-01-30 12:59:59PM| +|MI,mi|分钟, 00-59|| +|SS,ss|秒, 00-59|| +|MS,ms|毫秒, 000-999|| +|US,us|微秒, 000000-999999|| +|NS,ns|纳秒, 000000000-999999999|| +|TZH,tzh|时区小时|2023-01-30 11:59:59PM +08| + +**使用说明**: +- `Month`, `Day`等的输出格式是左对齐的, 右侧添加空格, 如`2023-OCTOBER -01`, `2023-SEPTEMBER-01`, 9月是月份中英文字母数最长的, 因此9月没有空格. 星期类似. +- 使用`ms`, `us`, `ns`时, 以上三种格式的输出只在精度上不同, 比如ts为 `1697182085123`, `ms` 的输出为 `123`, `us` 的输出为 `123000`, `ns` 的输出为 `123000000`. +- 时间格式中无法匹配规则的内容会直接输出. 如果想要在格式串中指定某些能够匹配规则的部分不做转换, 可以使用双引号, 如`to_char(ts, 'yyyy-mm-dd "is formated by yyyy-mm-dd"')`. 如果想要输出双引号, 那么在双引号之前加一个反斜杠, 如 `to_char(ts, '\"yyyy-mm-dd\"')` 将会输出 `"2023-10-10"`. +- 那些输出是数字的格式, 如`YYYY`, `DD`, 大写与小写意义相同, 即`yyyy` 和 `YYYY` 可以互换. +- 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区. +- 输入时间戳的精度由所查询表的精度确定, 若未指定表, 则精度为毫秒. + +#### TO_TIMESTAMP + +```sql +TO_TIMESTAMP(ts_str_literal, format_str_literal) +``` + +**功能说明**: 将字符串按照指定格式转化为时间戳. + +**返回结果数据类型**: TIMESTAMP + +**应用字段**: VARCHAR + +**嵌套子查询支持**: 适用于内层查询和外层查询 + +**适用于**: 表和超级表 + +**支持的格式**: 与`to_char`相同 + +**使用说明**: +- 若`ms`, `us`, `ns`同时指定, 那么结果时间戳包含上述三个字段的和. 如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出为 `2023-10-10 10:10:10.123456789`对应的时间戳. +- `MONTH`, `MON`, `DAY`, `DY` 以及其他输出为数字的格式的大小写意义相同, 如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`, `month`可以被替换为`MONTH` 或者`Month`. +- 如果同一字段被指定了多次, 那么前面的指定将会被覆盖. 如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`, 输出年份是`2022`. +- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。 +- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`, 未指定部分使用该默认值中的对应部分. 暂不支持只指定年日而不指定月日的格式, 如'yyyy-mm-DDD', 支持'yyyy-mm-DD'. +- 如果格式串中有`AM`, `PM`等, 那么小时必须是12小时制, 范围必须是01-12. +- `to_timestamp`转换具有一定的容错机制, 在格式串和时间戳串不完全对应时, 有时也可转换, 如: `to_timestamp('200101/2', 'yyyyMM1/dd')`, 格式串中多出来的1会被丢弃. 格式串与时间戳串中多余的空格字符(空格, tab等)也会被 自动忽略. 如`to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换. 虽然`MM`等字段需要两个数字对应(只有一位时前面补0), 在`to_timestamp`时, 一个数字也可以成功转换. +- 输出时间戳的精度与查询表的精度相同, 若查询未指定表, 则输出精度为毫秒. 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')`的输出将会把微妙和纳秒进行截断. 如果指定一张纳秒表, 那么就不会发生截断, 如`select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`. + + +### 时间和日期函数 + +时间和日期函数对时间戳类型进行操作。 + +所有返回当前时间的函数,如NOW、TODAY和TIMEZONE,在一条SQL语句中不论出现多少次都只会被计算一次。 + +#### NOW + +```sql +NOW() +``` + +**功能说明**:返回客户端当前系统时间。 + +**返回结果数据类型**:TIMESTAMP。 + +**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 + +**适用于**:表和超级表。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**使用说明**: + +- 支持时间加减操作,如 NOW() + 1s, 支持的时间单位如下: + b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 + + +#### TIMEDIFF + +```sql +TIMEDIFF(expr1, expr2 [, time_unit]) +``` + +**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 + +**返回结果数据类型**:BIGINT。 + +**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 + +**适用于**:表和超级表。 + +**嵌套子查询支持**:适用于内层查询和外层查询。 + +**使用说明**: +- 支持的时间单位 time_unit 如下: + 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 +- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 + + +#### TIMETRUNCATE + +```sql +TIMETRUNCATE(expr, time_unit [, use_current_timezone]) + +use_current_timezone: { + 0 + | 1 +} +``` + +**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。 + +**返回结果数据类型**:TIMESTAMP。 + +**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 + +**适用于**:表和超级表。 + +**使用说明**: +- 支持的时间单位 time_unit 如下: + 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 +- 当使用 1d/1w 作为时间单位对时间戳进行截断时, 可通过设置 use_current_timezone 参数指定是否根据当前时区进行截断处理。 + 值 0 表示使用 UTC 时区进行截断,值 1 表示使用当前时区进行截断。 + 例如客户端所配置时区为 UTC+0800, 则 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) 返回结果为东八区时间 '2020-01-01 08:00:00'。 + 而使用 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) 时,返回结果为东八区时间 '2020-01-01 00:00:00'。 + 当不指定 use_current_timezone 时,use_current_timezone 默认值为 1 。 + + + +#### TIMEZONE + +```sql +TIMEZONE() +``` + +**功能说明**:返回客户端当前时区信息。 + +**返回结果数据类型**:VARCHAR。 + +**应用字段**:无 + +**适用于**:表和超级表。 + + +#### TODAY + +```sql +TODAY() +``` + +**功能说明**:返回客户端当日零时的系统时间。 + +**返回结果数据类型**:TIMESTAMP。 + +**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 支持时间加减操作,如 TODAY() + 1s, 支持的时间单位如下: + b(纳秒),u(微秒),a(毫秒),s(秒),m(分),h(小时),d(天),w(周)。 +- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 + + +## 聚合函数 + +聚合函数为查询结果集的每一个分组返回单个结果行。可以由 GROUP BY 或窗口切分子句指定分组,如果没有,则整个查询结果集视为一个分组。 + +TDengine 支持针对数据的聚合查询。提供如下聚合函数。 + +### APERCENTILE + +```sql +APERCENTILE(expr, p [, algo_type]) + +algo_type: { + "default" + | "t-digest" +} +``` + +**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。 + +**返回数据类型**: DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**说明**: +- p值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。 +- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。 +- "t-digest"算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。 + +### AVG + +```sql +AVG(expr) +``` + +**功能说明**:统计指定字段的平均值。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### COUNT + +```sql +COUNT({* | expr}) +``` + +**功能说明**:统计指定字段的记录行数。 + +**返回数据类型**:BIGINT。 + +**适用数据类型**:全部类型字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。 +- 如果统计字段是具体的列,则返回该列中非 NULL 值的记录数量。 + + +### ELAPSED + +```sql +ELAPSED(ts_primary_key [, time_unit]) +``` + +**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。 + +**返回结果类型**:DOUBLE。 + +**适用数据类型**:TIMESTAMP。 + +**适用于**: 表,超级表,嵌套查询的外层查询 + +**说明**: +- ts_primary_key参数只能是表的第一列,即 TIMESTAMP 类型的主键列。 +- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下: + 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 +- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。 +- order by asc/desc不影响差值的计算结果。 +- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。 +- 对于普通表,不支持和group by子句组合使用。 +- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。 +- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。 + + +### LEASTSQUARES + +```sql +LEASTSQUARES(expr, start_val, step_val) +``` + +**功能说明**:统计表中某列的值的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。 + +**返回数据类型**:字符串表达式(斜率, 截距)。 + +**适用数据类型**:expr 必须是数值类型。 + +**适用于**:表。 + + +### SPREAD + +```sql +SPREAD(expr) +``` + +**功能说明**:统计表中某列的最大值和最小值之差。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:INTEGER, TIMESTAMP。 + +**适用于**:表和超级表。 + + +### STDDEV + +```sql +STDDEV(expr) +``` + +**功能说明**:统计表中某列的均方差。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### SUM + +```sql +SUM(expr) +``` + +**功能说明**:统计表/超级表中某列的和。 + +**返回数据类型**:DOUBLE, BIGINT。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### HYPERLOGLOG + +```sql +HYPERLOGLOG(expr) +``` + +**功能说明**: + - 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。 + - 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。 + +**返回结果类型**:INTEGER。 + +**适用数据类型**:任何类型。 + +**适用于**:表和超级表。 + + +### HISTOGRAM + +```sql +HISTOGRAM(expr,bin_type, bin_description, normalized) +``` + +**功能说明**:统计数据按照用户指定区间的分布。 + +**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为 DOUBLE 类型,否则为 BIGINT 类型。 + +**适用数据类型**:数值型字段。 + +**适用于**: 表和超级表。 + +**详细说明**: +- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 +- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): + - "user_input": "[1, 3, 5, 7]" + 用户指定 bin 的具体数值。 + + - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, + 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 + + - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" + "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, + 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 +- normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 + + +### PERCENTILE + +```sql +PERCENTILE(expr, p [, p1] ... ) +``` + +**功能说明**:统计表中某列的值百分比分位数。 + +**返回数据类型**: 该函数最小参数个数为 2 个,最大参数个数为 11 个。可以最多同时返回 10 个百分比分位数。当参数个数为 2 时, 返回一个分位数, 类型为DOUBLE,当参数个数大于 2 时,返回类型为VARCHAR, 格式为包含多个返回值的JSON数组。 + +**应用字段**:数值类型。 + +**适用于**:表。 + +**使用说明**: + +- *P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX; +- 同时计算针对同一列的多个分位数时,建议使用一个PERCENTILE函数和多个参数的方式,能很大程度上降低查询的响应时间。 + 比如,使用查询SELECT percentile(col, 90, 95, 99) FROM table, 性能会优于SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table。 + + +## 选择函数 + +选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。 + +### BOTTOM + +```sql +BOTTOM(expr, k) +``` + +**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- *k*值取值范围 1≤*k*≤100; +- 系统同时返回该记录关联的时间戳列; +- 限制:BOTTOM 函数不支持 FILL 子句。 + +### FIRST + +```sql +FIRST(expr) +``` + +**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:所有字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*); +- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL; +- 如果结果集中所有列全部为 NULL 值,则不返回结果。 + +### INTERP + +```sql +INTERP(expr [, ignore_null_values]) + +ignore_null_values: { + 0 + | 1 +} +``` + +**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。 + +**返回数据类型**:同字段类型。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明** + +- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 +- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 +- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。 +- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 +- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. +- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) +- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear). +- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 +- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。 +- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。 + +### LAST + +```sql +LAST(expr) +``` + +**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:所有字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); +- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 +- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 + + +### LAST_ROW + +```sql +LAST_ROW(expr) +``` + +**功能说明**:返回表/超级表的最后一条记录。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:所有字段。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 +- 不能与 INTERVAL 一起使用。 + +### MAX + +```sql +MAX(expr) +``` + +**功能说明**:统计表/超级表中某列的值最大值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### MIN + +```sql +MIN(expr) +``` + +**功能说明**:统计表/超级表中某列的值最小值。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### MODE + +```sql +MODE(expr) +``` + +**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,则随机输出其中某个值。 + +**返回数据类型**:与输入数据类型一致。 + +**适用数据类型**:全部类型字段。 + +**适用于**:表和超级表。 + + +### SAMPLE + +```sql +SAMPLE(expr, k) +``` + +**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 + +**返回结果类型**: 同原始数据类型。 + +**适用数据类型**: 全部类型字段。 + +**嵌套子查询支持**: 适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + + +### TAIL + +```sql +TAIL(expr, k [, offset_rows]) +``` + +**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。 + +**参数范围**:k: [1,100] offset_val: [0,100]。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:适合于除时间主键列外的任何类型。 + +**适用于**:表、超级表。 + + +### TOP + +```sql +TOP(expr, k) +``` + +**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- *k*值取值范围 1≤*k*≤100; +- 系统同时返回该记录关联的时间戳列; +- 限制:TOP 函数不支持 FILL 子句。 + +### UNIQUE + +```sql +UNIQUE(expr) +``` + +**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。 + +**返回数据类型**:同应用的字段。 + +**适用数据类型**:全部类型字段。 + +**适用于**: 表和超级表。 + + +## 时序数据特有函数 + +时序数据特有函数是 TDengine 为了满足时序数据的查询场景而量身定做出来的。在通用数据库中,实现类似功能通常需要复杂的查询语法,且效率很低。TDengine 以函数的方式内置了这些功能,最大程度的减轻了用户的使用成本。 + +### CSUM + +```sql +CSUM(expr) +``` + +**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。 + +**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**: 适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 +- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 + + +### DERIVATIVE + +```sql +DERIVATIVE(expr, time_interval, ignore_negative) + +ignore_negative: { + 0 + | 1 +} +``` + +**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 + +### DIFF + +```sql +DIFF(expr [, ignore_negative]) + +ignore_negative: { + 0 + | 1 +} +``` + +**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。 + +**返回数据类型**:同应用字段。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 输出结果行数是范围内总行数减一,第一行没有结果输出。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 + + +### IRATE + +```sql +IRATE(expr) +``` + +**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +### MAVG + +```sql +MAVG(expr, k) +``` + +**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 + +**返回结果类型**: DOUBLE。 + +**适用数据类型**: 数值类型。 + +**嵌套子查询支持**: 适用于内层查询和外层查询。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); +- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; + + +### STATECOUNT + +```sql +STATECOUNT(expr, oper, val) +``` + +**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 + +**参数范围**: + +- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。 +- val : 数值型 + +**返回结果类型**:INTEGER。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:不支持应用在子查询上。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 + + +### STATEDURATION + +```sql +STATEDURATION(expr, oper, val, unit) +``` + +**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。 + +**参数范围**: + +- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。 +- val : 数值型 +- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。 + +**返回结果类型**:INTEGER。 + +**适用数据类型**:数值类型。 + +**嵌套子查询支持**:不支持应用在子查询上。 + +**适用于**:表和超级表。 + +**使用说明**: + +- 不能和窗口操作一起使用,例如 interval/state_window/session_window。 + + +### TWA + +```sql +TWA(expr) +``` + +**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。 + +**返回数据类型**:DOUBLE。 + +**适用数据类型**:数值类型。 + +**适用于**:表和超级表。 + + +## 系统信息函数 + +### DATABASE + +```sql +SELECT DATABASE(); +``` + +**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用USE命令切换数据库,则返回NULL。 + + +### CLIENT_VERSION + +```sql +SELECT CLIENT_VERSION(); +``` + +**说明**:返回客户端版本。 + +### SERVER_VERSION + +```sql +SELECT SERVER_VERSION(); +``` + +**说明**:返回服务端版本。 + +### SERVER_STATUS + +```sql +SELECT SERVER_STATUS(); +``` + +**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。 + +### CURRENT_USER + +```sql +SELECT CURRENT_USER(); +``` + +**说明**:获取当前用户。 + + +## Geometry 函数 + +### Geometry 输入函数: + +#### ST_GeomFromText + +```sql +ST_GeomFromText(VARCHAR WKT expr) +``` + +**功能说明**:根据 Well-Known Text (WKT) 表示从指定的几何值创建几何数据。 + +**返回值类型**:GEOMETRY + +**适用数据类型**:VARCHAR + +**适用表类型**:标准表和超表 + +**使用说明**:输入可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。输出是以二进制字符串形式定义的 GEOMETRY 数据类型。 + +### Geometry 输出函数: + +#### ST_AsText + +```sql +ST_AsText(GEOMETRY geom) +``` + +**功能说明**:从几何数据中返回指定的 Well-Known Text (WKT) 表示。 + +**返回值类型**:VARCHAR + +**适用数据类型**:GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:输出可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。 + +### Geometry 关系函数: + +#### ST_Intersects + +```sql +ST_Intersects(GEOMETRY geomA, GEOMETRY geomB) +``` + +##功能说明**:比较两个几何对象,并在它们相交时返回 true。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:如果两个几何对象有任何一个共享点,则它们相交。 + +#### ST_Equals + +```sql +ST_Equals(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果给定的几何对象是"空间相等"的,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:"空间相等"意味着 ST_Contains(A,B) = true 和 ST_Contains(B,A) = true,并且点的顺序可能不同,但表示相同的几何结构。 + +#### ST_Touches + +```sql +ST_Touches(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 A 和 B 相交,但它们的内部不相交,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 和 B 至少有一个公共点,并且这些公共点位于至少一个边界中。对于点/点输入,关系始终为 FALSE,因为点没有边界。 + +#### ST_Covers + +```sql +ST_Covers(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 B 中的每个点都位于几何形状 A 内部(与内部或边界相交),则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 包含 B 意味着 B 中的没有点位于 A 的外部(在外部)。 + +#### ST_Contains + +```sql +ST_Contains(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 A 包含 B,描述:如果几何形状 A 包含几何形状 B,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 包含 B 当且仅当 B 的所有点位于 A 的内部(即位于内部或边界上)(或等效地,B 的没有点位于 A 的外部),并且 A 和 B 的内部至少有一个公共点。 + +#### ST_ContainsProperly + +```sql +ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 B 的每个点都位于 A 内部,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:B 的没有点位于 A 的边界或外部。 diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md index 98bfd3567a..8ae3f900f4 100755 --- a/docs/zh/12-taos-sql/12-distinguished.md +++ b/docs/zh/12-taos-sql/12-distinguished.md @@ -16,7 +16,11 @@ TDengine 提供的特色查询包括数据切分查询和时间窗口切分查 PARTITION BY part_list ``` -part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。 +part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。例如,将数据按标签 location 进行分组,取每个分组内的电压平均值: +```sql +select location, avg(voltage) from meters partition by location +``` + TDengine 按如下方式处理数据切分子句: @@ -25,9 +29,13 @@ TDengine 按如下方式处理数据切分子句: - 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。 ```sql -select max(current) from meters partition by location interval(10m) +select _wstart, location, max(current) from meters partition by location interval(10m) ``` -数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。 +数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。例如,统计每个电表每 10 分钟内的电压平均值: +```sql +select _wstart, tbname, avg(voltage) from meters partition by tbname interval(10m) +``` + ## 窗口切分查询 diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md index 597c188c11..bb88c2dede 100644 --- a/docs/zh/14-reference/05-taosbenchmark.md +++ b/docs/zh/14-reference/05-taosbenchmark.md @@ -81,13 +81,13 @@ taosBenchmark -f -#### 订阅场景 JSON 配置文件示例 +#### 订阅场景 JSON 配置文件示例
-subscribe.json +tmq.json ```json -{{#include /taos-tools/example/subscribe.json}} +{{#include /taos-tools/example/tmq.json}} ```
diff --git a/docs/zh/20-third-party/75-powerbi.md b/docs/zh/20-third-party/75-powerbi.md new file mode 100644 index 0000000000..255b23402b --- /dev/null +++ b/docs/zh/20-third-party/75-powerbi.md @@ -0,0 +1,50 @@ +--- +sidebar_label: Power BI +title: Power BI +description: 如何使用 Power BI 和 TDengine 进行时序数据分析 +--- + +# 如何使用 Power BI 和 TDengine 进行时序数据分析 + +## 方案介绍 + +使用 ODBC 连接器,Power BI 可以快速的访问 TDengine。您可以将标签数据、原始时序数据或按时间聚合后的时序数据从 TDengine 导入到 Power BI,制作报表或仪表盘,整个过程不需要任何的代码编写过程。 + +### 整体步骤 +![Power BI use step](./powerbi-step-zh.webp) + +### 前置要求 +1. TDengine 服务端软件已经安装并运行 +2. Power BI Desktop 软件已经安装并运行(如未安装,请从[官方地址](https://www.microsoft.com/zh-cn/download/details.aspx?id=58494)下载最新的 Windows X64 版本)。 + + +### 安装驱动 +从 TDengine 官网下载最新的 Windows X64 客户端驱动程序 [下载地址](https://docs.taosdata.com/get-started/package/),并安装在 Power BI 运行的机器上 + +### 配置数据源 +请参考 [ODBC](../../connector/odbc) 配置Websocket数据源。 + +### 导入 TDengine 数据到 Power BI +1. 打开 Power BI 并登录后,通过如下步骤添加数据源,“主页” -> “获取数据” -> “其他” -> “ODBC” -> “连接” +2. 选择数据源名称后,连接到配置好的数据源,进入导航器,浏览对应数据库的数据表并加载 +3. 如果需要输入 SQL 语句,可以点击“高级选项”,在展开的对话框中输入并加载数据 + + +为了更好的使用 Power BI 分析 TDengine 中的数据,您需要理解维度、度量、时序、相关性的概念,然后通过自定义的 SQL 语句导入数据。 +1. 维度:通常是分类(文本)数据,描述设备、测点、型号等类别信息。在 TDengine 的超级表中,使用标签列存储数据的维度信息,可以通过形如 `select distinct tbname, tag1, tag2 from supertable` 的 SQL 语法快速获得维度信息。 +2. 度量:可以用于进行计算的定量(数值)字段, 常见计算有求和、平均值和最小值等。如果测点的采集频率为秒,那么一年就有 31,536,000 条记录,把这些数据全部导入 Power BI 会严重影响其执行效率。在 TDengine 中,您可以使用数据切分查询、窗口切分查询等语法,结合与窗口相关的伪列,把降采样后的数据导入到 Power BI 中,具体语法参考 [TDengine 特色查询功能介绍](https://docs.taosdata.com/taos-sql/distinguished/)。 + - 窗口切分查询:比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值,这种场景下可以使用窗口子句来获得需要的降采样查询结果,对应的 SQL 语句形如 `select tbname, _wstart date,avg(temperature) temp from table interval(10m)` ,其中 _wstart 是伪列,表示时间窗口起始时间,10m 表示时间窗口的持续时间,`avg(temperature)` 表示时间窗口内的聚合值。 + - 数据切分查询:如果需要同时获取很多温度传感器的聚合数值,可对数据进行切分然后在切分出的数据空间内再进行一系列的计算,对应的 SQL 语法参考 `partition by part_list`。数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,将每个子表的数据独立出来,形成一条条独立的时间序列,方便各种时序场景的统计分析。 +3. 时序:在绘制曲线或者按照时间聚合数据时,通常需要引入日期表。日期表可以从 Excel 表格中导入,也可以在 TDengine 中执行 SQL 语句获取,例如 `select _wstart date, count(*) cnt from test.meters where ts between A and B interval(1d) fill(0)`,其中 fill 字句表示数据缺失情况下的填充模式,伪列_wstart 则为要获取的日期列。 +4. 相关性:告诉数据之间如何关联,度量和维度可以通过 tbname 列关联在一起,日期表和度量则可以通过 date 列关联,配合形成可视化报表。 + +### 智能电表样例 +TDengine 有自己独特的数据模型,它使用超级表作为模板,为每个设备创建一个表,每个表最多可创建 4096 个数据列和 128 个标签列。在智能电表样例中,假如一个电表每秒产生一条记录,一天就有 86,400 条记录,一年就有 31,536,000 条记录,1000 个电表将占用 600 GB 原始磁盘空间。因此,Power BI 更多的应用方式是将标签列映射为维度列,数据列的聚合结果导入为度量列,最终为关键决策制定者提供所需的指标。 +1. 导入维度数据 +在 Power BI 中导入表的标签列,取名为 tags,SQL 如下 +select distinct tbname, groupid, location from test.meters; +2. 导入度量数据 +在 Power BI 中,按照 1 小时的时间窗口,导入每个电表的电流均值、电压均值、相位均值,取名为 data,SQL 如下 +`select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)` ; +3. 建立维度和度量的关联关系 +在 Power BI 中,打开模型视图,建立表 tags 和 data 的关联关系,将 tbname 设置为关联数据列。之后,就可以在柱状图、饼图等控件中使用这些数据。更多有关 Power BI 构建视觉效果的信息,请查询 [Power BI 文档](https://learn.microsoft.com/zh-cn/power-bi/)。 \ No newline at end of file diff --git a/docs/zh/20-third-party/powerbi-step-zh.webp b/docs/zh/20-third-party/powerbi-step-zh.webp new file mode 100644 index 0000000000..16e48e8aaa Binary files /dev/null and b/docs/zh/20-third-party/powerbi-step-zh.webp differ diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 48e014975a..bafe7583b7 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -32,6 +32,7 @@ extern "C" { #endif /* ------------------------ MESSAGE DEFINITIONS ------------------------ */ + #define TD_MSG_NUMBER_ #undef TD_MSG_DICT_ #undef TD_MSG_INFO_ @@ -357,7 +358,7 @@ typedef enum ENodeType { QUERY_NODE_SHOW_USER_PRIVILEGES_STMT, QUERY_NODE_SHOW_VIEWS_STMT, QUERY_NODE_SHOW_COMPACTS_STMT, - QUERY_NODE_SHOW_COMPACT_DETAILS_STMT, + QUERY_NODE_SHOW_COMPACT_DETAILS_STMT, // logic plan node QUERY_NODE_LOGIC_PLAN_SCAN = 1000, @@ -1699,7 +1700,7 @@ typedef struct { int32_t vgId; int32_t dnodeId; int32_t numberFileset; - int32_t finished; + int32_t finished; } SQueryCompactProgressRsp; int32_t tSerializeSQueryCompactProgressRsp(void* buf, int32_t bufLen, SQueryCompactProgressRsp* pReq); diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 5ad66c64e7..24ad5abded 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -38,7 +38,7 @@ #undef TD_NEW_MSG_SEG #undef TD_DEF_MSG_TYPE #undef TD_CLOSE_MSG_SEG - #define TD_NEW_MSG_SEG(TYPE) + #define TD_NEW_MSG_SEG(TYPE) #define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) #define TD_CLOSE_MSG_SEG(TYPE) TYPE, int32_t tMsgRangeDict[] = { @@ -76,9 +76,7 @@ #define TD_CLOSE_MSG_SEG(TYPE) enum { - - -#else +#else #undef TD_NEW_MSG_SEG #undef TD_DEF_MSG_TYPE @@ -109,7 +107,7 @@ TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_DND_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_DND_MSG) TD_NEW_MSG_SEG(TDMT_MND_MSG) // 1<<8 TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL) @@ -220,7 +218,7 @@ TD_DEF_MSG_TYPE(TDMT_MND_KILL_COMPACT, "kill-compact", SKillCompactReq, NULL) TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_TIMER, "compact-tmr", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8 TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp) @@ -272,7 +270,7 @@ TD_DEF_MSG_TYPE(TDMT_VND_QUERY_COMPACT_PROGRESS, "vnode-query-compact-progress", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_KILL_COMPACT, "kill-compact", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_VND_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_VND_MSG) TD_NEW_MSG_SEG(TDMT_SCH_MSG) // 3<<8 TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL) @@ -287,7 +285,7 @@ TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_SCH_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_SCH_MSG) TD_NEW_MSG_SEG(TDMT_STREAM_MSG) //4 << 8 @@ -305,11 +303,11 @@ TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_HTASK_DROP, "stream-htask-drop", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_STREAM_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_STREAM_MSG) TD_NEW_MSG_SEG(TDMT_MON_MSG) //5 << 8 TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_MON_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_MON_MSG) TD_NEW_MSG_SEG(TDMT_SYNC_MSG) //6 << 8 TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL) @@ -341,8 +339,7 @@ TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_SYNC_MSG) - + TD_CLOSE_MSG_SEG(TDMT_END_SYNC_MSG) TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG) //7 << 8 TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL) @@ -352,7 +349,7 @@ TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_VND_STREAM_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_VND_STREAM_MSG) TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG) //8 << 8 TD_DEF_MSG_TYPE(TDMT_VND_TMQ_SUBSCRIBE, "vnode-tmq-subscribe", SMqRebVgReq, SMqRebVgRsp) @@ -366,13 +363,10 @@ TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_COMMITTEDINFO, "vnode-tmq-committedinfo", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL) - TD_CLOSE_MSG_SEG(TDMT_END_TMQ_MSG) + TD_CLOSE_MSG_SEG(TDMT_END_TMQ_MSG) TD_NEW_MSG_SEG(TDMT_MAX_MSG) // msg end mark - TD_CLOSE_MSG_SEG(TDMT_END_MAX_MSG) - - - + TD_CLOSE_MSG_SEG(TDMT_END_MAX_MSG) #if defined(TD_MSG_NUMBER_) TDMT_MAX diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 6730d211df..918e50a9d0 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -691,6 +691,7 @@ typedef struct SStreamHbMsg { int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp); int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pRsp); +void streamMetaClearHbMsg(SStreamHbMsg* pMsg); typedef struct { int64_t streamId; diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index eca75ce71a..c029b1871a 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -87,6 +87,7 @@ cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_h cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/udfd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin if [ -f "${compile_dir}/build/bin/taosadapter" ]; then cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||: diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index e93af2470a..b846cd447b 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -82,6 +82,7 @@ cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/udfd %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin if [ -f %{_compiledir}/../build-taoskeeper/taoskeeper ]; then cp %{_compiledir}/../build-taoskeeper/taoskeeper %{buildroot}%{homepath}/bin diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 02ebb182fa..ae774a3289 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -621,8 +621,7 @@ function install_share_etc() { ${csudo} cp ${script_dir}/share/srv/* ${service_config_dir} ||: } -function install_log() { - ${csudo}rm -rf ${log_dir} || : +function install_log() { ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} ${csudo}ln -sf ${log_dir} ${install_main_dir}/log @@ -935,7 +934,9 @@ function updateProduct() { install_adapter_service install_adapter_config install_keeper_service - install_keeper_config + if [ "${verMode}" != "cloud" ]; then + install_keeper_config + fi openresty_work=false @@ -1036,8 +1037,9 @@ function installProduct() { install_adapter_service install_adapter_config install_keeper_service - install_keeper_config - + if [ "${verMode}" != "cloud" ]; then + install_keeper_config + fi openresty_work=false diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 1ec83b7b0d..36298a291e 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -401,7 +401,14 @@ echo echo "Do you want to remove all the data, log and configuration files? [y/n]" read answer if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then + confirmMsg="I confirm that I would like to delete all data, log and configuration files" + echo "Please enter '${confirmMsg}' to continue" + read answer + if [ X"$answer" == X"${confirmMsg}" ]; then remove_data_and_config + else + echo "answer doesn't match, skip this step" + fi fi echo diff --git a/packaging/tools/tdengine.iss b/packaging/tools/tdengine.iss index 8676fa2c51..ca6b5a3e5f 100644 --- a/packaging/tools/tdengine.iss +++ b/packaging/tools/tdengine.iss @@ -39,6 +39,8 @@ Compression=lzma SolidCompression=yes DisableDirPage=yes Uninstallable=yes +ArchitecturesAllowed=x64 +ArchitecturesInstallIn64BitMode=x64 [Languages] Name: "chinesesimp"; MessagesFile: "compiler:Default.isl" @@ -53,6 +55,7 @@ Source: favicon.ico; DestDir: "{app}\include"; Flags: igNoreversion; Source: {#MyAppSourceDir}{#MyAppDLLName}; DestDir: "{win}\System32"; Flags: igNoreversion recursesubdirs createallsubdirs 64bit;Check:IsWin64; Source: {#MyAppSourceDir}{#MyAppCfgName}; DestDir: "{app}\cfg"; Flags: igNoreversion recursesubdirs createallsubdirs onlyifdoesntexist uninsneveruninstall Source: {#MyAppSourceDir}{#MyAppDriverName}; DestDir: "{app}\driver"; Flags: igNoreversion recursesubdirs createallsubdirs +Source: {#MyAppSourceDir}\taos_odbc\*; DestDir: "{app}\taos_odbc\"; Flags: igNoreversion recursesubdirs createallsubdirs ;Source: {#MyAppSourceDir}{#MyAppConnectorName}; DestDir: "{app}\connector"; Flags: igNoreversion recursesubdirs createallsubdirs ;Source: {#MyAppSourceDir}{#MyAppExamplesName}; DestDir: "{app}\examples"; Flags: igNoreversion recursesubdirs createallsubdirs Source: {#MyAppSourceDir}{#MyAppIncludeName}; DestDir: "{app}\include"; Flags: igNoreversion recursesubdirs createallsubdirs @@ -66,6 +69,7 @@ Source: {#MyAppSourceDir}\taosdump.exe; DestDir: "{app}"; DestName: "{#CusPrompt [run] Filename: {sys}\sc.exe; Parameters: "create taosd start= DEMAND binPath= ""C:\\TDengine\\taosd.exe --win_service""" ; Flags: runhidden Filename: {sys}\sc.exe; Parameters: "create taosadapter start= DEMAND binPath= ""C:\\TDengine\\taosadapter.exe""" ; Flags: runhidden +Filename: "{cmd}"; Parameters: "/c odbcconf /F ""C:\TDengine\taos_odbc\win_odbcinst.in"""; WorkingDir: "{app}"; Flags: runhidden; StatusMsg: "Configuring ODBC" [UninstallRun] RunOnceId: "stoptaosd"; Filename: {sys}\sc.exe; Parameters: "stop taosd" ; Flags: runhidden @@ -95,6 +99,43 @@ begin Result := Pos(';' + Param + ';', ';' + OrigPath + ';') = 0; end; +function DeleteOdbcDsnRegistry: Boolean; +var + Names: TArrayOfString; + I: Integer; + Value: String; +begin + if RegGetValueNames(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources', Names) then + begin + for I := 0 to GetArrayLength(Names) - 1 do + begin + if RegQueryStringValue(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources', Names[I], Value) then + begin + if Value = 'TDengine' then + begin + RegDeleteKeyIncludingSubkeys(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\' + Names[I]); + RegDeleteValue(HKCU64, 'SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources\', Names[I]); + end; + end; + end; + end; + Result := True; +end; + +function DeleteOdbcDriverRegistry: Boolean; +begin + RegDeleteKeyIncludingSubkeys(HKLM64, 'SOFTWARE\ODBC\ODBCINST.INI\TDengine'); + RegDeleteValue(HKLM64, 'SOFTWARE\ODBC\ODBCINST.INI\ODBC Drivers', 'TDengine'); + Result := True; +end; + + +procedure DeinitializeUninstall(); +begin + DeleteOdbcDsnRegistry(); + DeleteOdbcDriverRegistry(); +end; + [UninstallDelete] Name: {app}\driver; Type: filesandordirs Name: {app}\connector; Type: filesandordirs diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 1ce7c02dcf..bb79146f7d 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -1309,6 +1309,10 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) { } SRequestObj *pRequest = res; + if (TSDB_SQL_RETRIEVE_EMPTY_RESULT == pRequest->type) { + fp(param, res, 0); + return; + } taosAsyncFetchImpl(pRequest, fp, param); } diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 918632b7e7..8cf48d41dc 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -1827,7 +1827,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) char *tb = taosHashIterate(pRsp->readTbs, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1842,7 +1842,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->writeTbs, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1857,7 +1857,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->alterTbs, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1872,7 +1872,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->readViews, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1887,7 +1887,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->writeViews, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1902,7 +1902,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) tb = taosHashIterate(pRsp->alterViews, NULL); while (tb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(tb, &keyLen); + void * key = taosHashGetKey(tb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -1917,7 +1917,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp) int32_t *useDb = taosHashIterate(pRsp->useDbs, NULL); while (useDb != NULL) { size_t keyLen = 0; - void *key = taosHashGetKey(useDb, &keyLen); + void * key = taosHashGetKey(useDb, &keyLen); if (tEncodeI32(pEncoder, keyLen) < 0) return -1; if (tEncodeCStr(pEncoder, key) < 0) return -1; @@ -8342,7 +8342,7 @@ int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { for (int32_t i = 0; i < pRsp->blockNum; i++) { int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i); - void *data = taosArrayGetP(pRsp->blockData, i); + void * data = taosArrayGetP(pRsp->blockData, i); if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1; if (pRsp->withSchema) { SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i); @@ -8375,7 +8375,7 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) { } for (int32_t i = 0; i < pRsp->blockNum; i++) { - void *data; + void * data; uint64_t bLen; if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1; taosArrayPush(pRsp->blockData, &data); @@ -8421,7 +8421,7 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) { if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1; if (pRsp->createTableNum) { for (int32_t i = 0; i < pRsp->createTableNum; i++) { - void *createTableReq = taosArrayGetP(pRsp->createTableReq, i); + void * createTableReq = taosArrayGetP(pRsp->createTableReq, i); int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i); if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1; } @@ -8437,7 +8437,7 @@ int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) { pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t)); pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *)); for (int32_t i = 0; i < pRsp->createTableNum; i++) { - void *pCreate = NULL; + void * pCreate = NULL; uint64_t len; if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1; int32_t l = (int32_t)len; @@ -8739,7 +8739,7 @@ void tDestroySubmitTbData(SSubmitTbData *pTbData, int32_t flag) { taosArrayDestroy(pTbData->aCol); } else { int32_t nRow = TARRAY_SIZE(pTbData->aRowP); - SRow **rows = (SRow **)TARRAY_DATA(pTbData->aRowP); + SRow ** rows = (SRow **)TARRAY_DATA(pTbData->aRowP); for (int32_t i = 0; i < nRow; ++i) { tRowDestroy(rows[i]); diff --git a/source/dnode/mgmt/test/snode/CMakeLists.txt b/source/dnode/mgmt/test/snode/CMakeLists.txt index 70f3054381..5de3e55b0a 100644 --- a/source/dnode/mgmt/test/snode/CMakeLists.txt +++ b/source/dnode/mgmt/test/snode/CMakeLists.txt @@ -5,7 +5,7 @@ target_link_libraries( PUBLIC sut ) -add_test( - NAME dsnodeTest - COMMAND dsnodeTest -) +#add_test( +# NAME dsnodeTest +# COMMAND dsnodeTest +#) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 58b8e04d3b..62840f7e1f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -93,6 +93,9 @@ static int32_t mndStreamSeqActionInsert(SSdb *pSdb, SStreamSeq *pStream); static int32_t mndStreamSeqActionDelete(SSdb *pSdb, SStreamSeq *pStream); static int32_t mndStreamSeqActionUpdate(SSdb *pSdb, SStreamSeq *pOldStream, SStreamSeq *pNewStream); +static SSdbRaw *mndStreamActionEncode(SStreamObj *pStream); +static SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw); + int32_t mndInitStream(SMnode *pMnode) { SSdbTable table = { .sdbType = SDB_STREAM, @@ -2932,6 +2935,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { tDecoderInit(&decoder, pReq->pCont, pReq->contLen); if (tDecodeStreamHbMsg(&decoder, &req) < 0) { + streamMetaClearHbMsg(&req); tDecoderClear(&decoder); terrno = TSDB_CODE_INVALID_MSG; return -1; @@ -3039,9 +3043,8 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) { } taosThreadMutexUnlock(&execInfo.lock); + streamMetaClearHbMsg(&req); - taosArrayDestroy(req.pTaskStatus); - taosArrayDestroy(req.pUpdateNodes); return TSDB_CODE_SUCCESS; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 427d5dd5d7..fd179dba2e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -4184,6 +4184,8 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { SReaderStatus* pStatus = &pReader->status; STableBlockScanInfo* pBlockScanInfo = NULL; + pReader->status.suspendInvoked = true; // record the suspend status + if (pStatus->loadFromFile) { SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); if (pBlockInfo != NULL) { @@ -4353,7 +4355,7 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) { return code; } - tsdbTrace("block from file rows: %"PRId64", will process pre-file set buffer: %d. %s", + tsdbTrace("block from file rows: %"PRId64", will process pre-file set buffer: %d. %s", pBlock->info.rows, pStatus->bProcMemFirstFileset, pReader->idStr); if (pStatus->bProcMemPreFileset) { if (pBlock->info.rows > 0) { @@ -4367,7 +4369,7 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) { pStatus->bProcMemPreFileset = false; } } - + if (pBlock->info.rows <= 0) { resetTableListIndex(&pReader->status); int64_t endKey = (ASCENDING_TRAVERSE(pReader->info.order)) ? INT64_MAX : INT64_MIN; diff --git a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h index 3679015e9c..39e65f22b1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReadUtil.h +++ b/source/dnode/vnode/src/tsdb/tsdbReadUtil.h @@ -197,6 +197,7 @@ typedef struct SFileBlockDumpInfo { } SFileBlockDumpInfo; typedef struct SReaderStatus { + bool suspendInvoked; bool loadFromFile; // check file stage bool composedDataBlock; // the returned data block is a composed block or not SSHashObj* pTableMap; // SHash @@ -210,7 +211,7 @@ typedef struct SReaderStatus { SArray* pLDataIterArray; SRowMerger merger; SColumnInfoData* pPrimaryTsCol; // primary time stamp output col info data - // the following for preceeds fileset memory processing + // the following for preceeds fileset memory processing // TODO: refactor into seperate struct bool bProcMemPreFileset; int64_t memTableMaxKey; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 254b6de5b8..da851da5d9 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -1007,10 +1007,18 @@ static bool waitForEnoughDuration(SMetaHbInfo* pInfo) { return false; } -static void clearHbMsg(SStreamHbMsg* pMsg, SArray* pIdList) { - taosArrayDestroy(pMsg->pTaskStatus); - taosArrayDestroy(pMsg->pUpdateNodes); - taosArrayDestroy(pIdList); +void streamMetaClearHbMsg(SStreamHbMsg* pMsg) { + if (pMsg == NULL) { + return; + } + + if (pMsg->pUpdateNodes != NULL) { + taosArrayDestroy(pMsg->pUpdateNodes); + } + + if (pMsg->pTaskStatus != NULL) { + taosArrayDestroy(pMsg->pTaskStatus); + } } static bool existInHbMsg(SStreamHbMsg* pMsg, SDownstreamTaskEpset* pTaskEpset) { @@ -1189,7 +1197,8 @@ void metaHbToMnode(void* param, void* tmrId) { } _end: - clearHbMsg(&hbMsg, pIdList); + streamMetaClearHbMsg(&hbMsg); + taosArrayDestroy(pIdList); taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr); taosReleaseRef(streamMetaId, rid); } diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 017969b4e5..362d38b505 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -1498,6 +1498,7 @@ int transSendResponse(const STransMsg* msg) { return 0; } SExHandle* exh = msg->info.handle; + if (exh == NULL) { rpcFreeCont(msg->pCont); return 0; diff --git a/tests/script/tsim/stream/basic4.sim b/tests/script/tsim/stream/basic4.sim index b4e3d62545..d2bf321ad5 100644 --- a/tests/script/tsim/stream/basic4.sim +++ b/tests/script/tsim/stream/basic4.sim @@ -80,6 +80,7 @@ sql use test2; sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int); sql create table t1 using st tags(1,1,1); sql create stream streams2 trigger at_once ignore expired 0 ignore update 0 waterMark 200s into streamt2 as select _wstart, count(*) c1 from t1 interval(1s); +sleep 1000 sql insert into t1 values(1648791211000,1,2,3,1.0); sql insert into t1 values(1648791212001,2,2,3,1.1);