merge 3.0
This commit is contained in:
commit
9380b22394
|
@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
|
|||
### Ubuntu 18.04 及以上版本 & Debian:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
```
|
||||
|
||||
#### 为 taos-tools 安装编译需要的软件
|
||||
|
|
|
@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
|||
### Ubuntu 18.04 and above or Debian
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
|
@ -119,6 +119,9 @@ ELSE ()
|
|||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
|
|
|
@ -171,3 +171,8 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_RELEASE
|
||||
"If build release version"
|
||||
OFF
|
||||
)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
|
||||
# rocksdb
|
||||
ExternalProject_Add(rocksdb
|
||||
GIT_REPOSITORY https://github.com/taosdata-contrib/rocksdb.git
|
||||
GIT_TAG v6.23.3
|
||||
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
|
||||
GIT_TAG v8.1.1
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG ae8d51c
|
||||
GIT_TAG 565ca21
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
message("contrib test/rocksdb:" ${BUILD_DEPENDENCY_TESTS})
|
||||
|
||||
add_executable(rocksdbTest "")
|
||||
target_sources(rocksdbTest
|
||||
PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
|
||||
)
|
||||
target_link_libraries(rocksdbTest rocksdb)
|
||||
target_link_libraries(rocksdbTest rocksdb)
|
||||
|
|
|
@ -116,9 +116,20 @@ int main(int argc, char const *argv[]) {
|
|||
rocksdb_options_set_create_if_missing(opt, 1);
|
||||
rocksdb_options_set_create_missing_column_families(opt, 1);
|
||||
|
||||
const char *cfName[] = {"default", "cf1"};
|
||||
int len = sizeof(cfName) / sizeof(cfName[0]);
|
||||
// Read
|
||||
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
||||
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||
int len = 1;
|
||||
char buf[256] = {0};
|
||||
size_t vallen = 0;
|
||||
char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||
snprintf(buf, vallen + 5, "val:%s", val);
|
||||
printf("%ld %ld %s\n", strlen(val), vallen, buf);
|
||||
|
||||
char **cfName = calloc(len, sizeof(char *));
|
||||
for (int i = 0; i < len; i++) {
|
||||
cfName[i] = "test";
|
||||
}
|
||||
const rocksdb_options_t **cfOpt = malloc(len * sizeof(rocksdb_options_t *));
|
||||
for (int i = 0; i < len; i++) {
|
||||
cfOpt[i] = rocksdb_options_create_copy(opt);
|
||||
|
@ -129,7 +140,7 @@ int main(int argc, char const *argv[]) {
|
|||
}
|
||||
|
||||
rocksdb_column_family_handle_t **cfHandle = malloc(len * sizeof(rocksdb_column_family_handle_t *));
|
||||
db = rocksdb_open_column_families(opt, path, len, cfName, cfOpt, cfHandle, &err);
|
||||
db = rocksdb_open_column_families(opt, path, len, (const char *const *)cfName, cfOpt, cfHandle, &err);
|
||||
|
||||
{
|
||||
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
|
||||
|
@ -302,4 +313,4 @@ int main(int argc, char const *argv[]) {
|
|||
// rocksdb_close(db);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ description: This website contains the user manuals for TDengine, an open-source
|
|||
slug: /
|
||||
---
|
||||
|
||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It's written mainly for architects, developers, and system administrators.
|
||||
|
||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation.
|
|||
|
||||
## Competitive Advantages
|
||||
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages.
|
||||
|
||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
|
@ -57,7 +57,7 @@ By making full use of [characteristics of time series data](https://tdengine.com
|
|||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
||||
|
||||
|
@ -109,8 +109,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||
| Very large total processing capacity | | | √ | TDengine's cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine's storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
|
||||
### System Maintenance Requirements
|
||||
|
@ -123,13 +123,12 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
## Comparison with other databases
|
||||
|
||||
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
|
||||
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
|
||||
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
|
||||
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
|
||||
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
|
||||
- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/)
|
||||
- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/)
|
||||
- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
|
||||
- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
|
||||
|
||||
## More readings
|
||||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
|
|||
|
||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||
|
||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won't build the index on any metrics stored. Column wise storage is used.
|
||||
|
||||
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain
|
|||
|
||||
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
|
||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
|
||||
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
|
||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
|
||||
|
@ -102,7 +102,7 @@ sudo apt-get install tdengine
|
|||
|
||||
:::tip
|
||||
This installation method is supported only for Debian and Ubuntu.
|
||||
::::
|
||||
:::
|
||||
</TabItem>
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
|
@ -208,6 +208,8 @@ The following `launchctl` commands can help you manage TDengine service:
|
|||
|
||||
- Check TDengine Server status: `sudo launchctl list | grep taosd`
|
||||
|
||||
- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd`
|
||||
|
||||
:::info
|
||||
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
|
||||
- The administrator privilege is required for service management to enhance security.
|
||||
|
|
|
@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
|
|||
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
||||
```
|
||||
|
||||
More configuration about connection,please refer to [Java Connector](/reference/connector/java)
|
||||
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```php title="原生连接"
|
||||
```php title=""native"
|
||||
{{#include docs/examples/php/connect.php}}
|
||||
```
|
||||
|
|
|
@ -33,7 +33,7 @@ There are two ways for a connector to establish connections to TDengine:
|
|||
|
||||
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
|
||||
|
||||
Key differences:
|
||||
Key differences:
|
||||
|
||||
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
||||
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
|
||||
|
@ -198,7 +198,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
|
|||
<TabItem label="R" value="r">
|
||||
|
||||
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
|
||||
2. Install the dependency package `RJDBC`:
|
||||
2. Install the dependency package `RJDBC`:
|
||||
|
||||
```R
|
||||
install.packages("RJDBC")
|
||||
|
@ -213,7 +213,7 @@ If the client driver (taosc) is already installed, then the C connector is alrea
|
|||
</TabItem>
|
||||
<TabItem label="PHP" value="php">
|
||||
|
||||
**Download Source Code Package and Unzip:**
|
||||
**Download Source Code Package and Unzip: **
|
||||
|
||||
```shell
|
||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||
|
@ -223,13 +223,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
|
||||
**Non-Swoole Environment:**
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure && make -j && make install
|
||||
```
|
||||
|
||||
**Specify TDengine Location:**
|
||||
**Specify TDengine Location: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||
|
@ -238,7 +238,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
|||
> `--with-tdengine-dir=` is followed by the TDengine installation location.
|
||||
> This way is useful in case TDengine location can't be found automatically or macOS.
|
||||
|
||||
**Swoole Environment:**
|
||||
**Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --enable-swoole && make -j && make install
|
||||
|
@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a
|
|||
</Tabs>
|
||||
|
||||
:::tip
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq).
|
||||
|
||||
:::
|
||||
|
|
|
@ -69,7 +69,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
|
|||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query SQL:
|
||||
If you want query the data of `location=California.LosAngeles,groupid=2`, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
||||
|
|
|
@ -84,7 +84,7 @@ Query OK, 4 row(s) in set (0.005399s)
|
|||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of `location=California.LosAngeles groupid=3`,here is the query SQL:
|
||||
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
|
|
|
@ -97,7 +97,7 @@ Query OK, 2 row(s) in set (0.004076s)
|
|||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query SQL:
|
||||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
|
|
|
@ -49,7 +49,7 @@ If the data source is Kafka, then the application program is a consumer of Kafka
|
|||
|
||||
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
||||
|
||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
|
||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config).
|
||||
|
||||
## Sample Programs
|
||||
|
||||
|
@ -98,7 +98,7 @@ The main Program is responsible for:
|
|||
3. Start reading threads
|
||||
4. Output writing speed every 10 seconds
|
||||
|
||||
The main program provides 4 parameters for tuning:
|
||||
The main program provides 4 parameters for tuning:
|
||||
|
||||
1. The number of reading threads, default value is 1
|
||||
2. The number of writing threads, default value is 2
|
||||
|
@ -192,7 +192,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
|||
|
||||
If you want to launch the sample program on a remote server, please follow below steps:
|
||||
|
||||
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
|
||||
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`:
|
||||
```
|
||||
mvn package
|
||||
```
|
||||
|
@ -385,7 +385,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
|
|||
pip3 install faster-fifo
|
||||
```
|
||||
|
||||
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
|
||||
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py`, `sql_writer.py`, and `mockdatasource.py`.
|
||||
|
||||
4. Execute the program
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
### python Kafka 客户端
|
||||
### python Kafka client
|
||||
|
||||
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
|
||||
|
||||
|
@ -88,7 +88,7 @@ In addition to python's built-in multithreading and multiprocessing library, we
|
|||
<details>
|
||||
<summary>kafka_example_consumer</summary>
|
||||
|
||||
`kafka_example_consumer` is `consumer`,which is responsible for consuming data from kafka and writing it to TDengine.
|
||||
`kafka_example_consumer` is `consumer`, which is responsible for consuming data from kafka and writing it to TDengine.
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example_consumer.py}}
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
```rust
|
||||
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
|
||||
```
|
|
@ -20,10 +20,10 @@ import CAsync from "./_c_async.mdx";
|
|||
|
||||
## Introduction
|
||||
|
||||
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
||||
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
||||
|
||||
- Query on single column or multiple columns
|
||||
- Filter on tags or data columns:>, <, =, <\>, like
|
||||
- Filter on tags or data columns: >, <, =, <\>, like
|
||||
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
|
||||
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
|
||||
- Arithmetic on columns of numeric types or aggregate results
|
||||
|
@ -160,7 +160,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
|
|||
:::note
|
||||
|
||||
1. With either REST connection or native connection, the above sample code works well.
|
||||
2. Please note that `use db` can't be used in case of REST connection because it's stateless.
|
||||
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
|||
|
||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||
|
||||
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
|
||||
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
|
||||
## Data Schema and API
|
||||
|
||||
|
@ -222,7 +222,7 @@ A database including one supertable and two subtables is created as follows:
|
|||
|
||||
```sql
|
||||
DROP DATABASE IF EXISTS tmqdb;
|
||||
CREATE DATABASE tmqdb;
|
||||
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
|
||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
|
||||
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
||||
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
||||
|
@ -294,7 +294,6 @@ You configure the following parameters when creating a consumer:
|
|||
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||
|
||||
The method of specifying these parameters depends on the language used:
|
||||
|
@ -312,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
|
|||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
@ -368,7 +366,6 @@ conf := &tmq.ConfigMap{
|
|||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
|
@ -416,7 +413,6 @@ Python programs use the following parameters:
|
|||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
|
||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -6,10 +6,12 @@ description: This document describes how to create user-defined functions (UDF),
|
|||
|
||||
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
|
||||
|
||||
TDengine supports user-defined functions written in C or C++. This document describes the usage of user-defined functions.
|
||||
|
||||
User-defined functions can be scalar functions or aggregate functions. Scalar functions, such as `abs`, `sin`, and `concat`, output a value for every row of data. Aggregate functions, such as `avg` and `max` output one value for multiple rows of data.
|
||||
|
||||
TDengine supports user-defined functions written in C or Python. This document describes the usage of user-defined functions.
|
||||
|
||||
## Implement a UDF in C
|
||||
|
||||
When you create a user-defined function, you must implement standard interface functions:
|
||||
- For scalar functions, implement the `scalarfn` interface function.
|
||||
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
|
||||
|
@ -17,7 +19,7 @@ When you create a user-defined function, you must implement standard interface f
|
|||
|
||||
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
|
||||
|
||||
## Implementing a Scalar Function
|
||||
### Implementing a Scalar Function in C
|
||||
The implementation of a scalar function is described as follows:
|
||||
```c
|
||||
#include "taos.h"
|
||||
|
@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
|
|||
```
|
||||
Replace `scalarfn` with the name of your function.
|
||||
|
||||
## Implementing an Aggregate Function
|
||||
### Implementing an Aggregate Function in C
|
||||
|
||||
The implementation of an aggregate function is described as follows:
|
||||
```c
|
||||
|
@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
|
|||
```
|
||||
Replace `aggfn` with the name of your function.
|
||||
|
||||
## Interface Functions
|
||||
### UDF Interface Definition in C
|
||||
|
||||
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
|
||||
|
||||
|
@ -108,8 +110,7 @@ Interface functions return a value that indicates whether the operation was succ
|
|||
|
||||
For information about the parameters for interface functions, see Data Model
|
||||
|
||||
### Interfaces for Scalar Functions
|
||||
|
||||
#### Scalar Interface
|
||||
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure.
|
||||
|
@ -118,7 +119,7 @@ The parameters in the function are defined as follows:
|
|||
- inputDataBlock: The data block to input.
|
||||
- resultColumn: The column to output. The column to output.
|
||||
|
||||
### Interfaces for Aggregate Functions
|
||||
#### Aggregate Interface
|
||||
|
||||
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
|
||||
|
||||
|
@ -126,7 +127,7 @@ The parameters in the function are defined as follows:
|
|||
|
||||
`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
|
||||
|
||||
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and aggfn is called on each block to update the result. Finally, aggfn_finish is called to generate final results from the intermediate results. The final result contains only one or zero data points.
|
||||
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and the `aggfn` function is called on each block to update the result. Finally, aggfn_finish is called to generate the final results from the intermediate results. The final result contains only one or zero data points.
|
||||
|
||||
The parameters in the function are defined as follows:
|
||||
- interBuf: The intermediate result buffer.
|
||||
|
@ -135,15 +136,15 @@ The parameters in the function are defined as follows:
|
|||
- result: The final result.
|
||||
|
||||
|
||||
### Initializing and Terminating User-Defined Functions
|
||||
#### Initialization and Cleanup Interface
|
||||
`int32_t udf_init()`
|
||||
|
||||
`int32_t udf_destroy()`
|
||||
|
||||
Replace `udf`with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
|
||||
Replace `udf` with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
|
||||
|
||||
|
||||
## Data Structure of User-Defined Functions
|
||||
### Data Structures for UDF in C
|
||||
```c
|
||||
typedef struct SUdfColumnMeta {
|
||||
int16_t type;
|
||||
|
@ -193,7 +194,7 @@ typedef struct SUdfInterBuf {
|
|||
```
|
||||
The data structure is described as follows:
|
||||
|
||||
- The SUdfDataBlock block includes the number of rows (numOfRows) and number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
|
||||
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
|
||||
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
|
||||
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
|
||||
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
|
||||
|
@ -201,9 +202,9 @@ The data structure is described as follows:
|
|||
|
||||
Additional functions are defined in `taosudf.h` to make it easier to work with these structures.
|
||||
|
||||
## Compile UDF
|
||||
### Compiling C UDF
|
||||
|
||||
To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL).
|
||||
To use your user-defined function in TDengine, first, compile it to a shared library.
|
||||
|
||||
For example, the sample UDF `bit_and.c` can be compiled into a DLL as follows:
|
||||
|
||||
|
@ -213,12 +214,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
|
|||
|
||||
The generated DLL file `libbitand.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
|
||||
|
||||
## Manage and Use User-Defined Functions
|
||||
After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md).
|
||||
### UDF Sample Code in C
|
||||
|
||||
## Sample Code
|
||||
|
||||
### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
|
||||
#### Scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
|
||||
|
||||
The bit_and function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_and function ignores null values.
|
||||
|
||||
|
@ -231,7 +229,7 @@ The bit_and function implements bitwise addition for multiple columns. If there
|
|||
|
||||
</details>
|
||||
|
||||
### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
|
||||
#### Aggregate function 1: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
|
||||
|
||||
The l2norm function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
|
||||
|
||||
|
@ -243,3 +241,151 @@ The l2norm function finds the second-order norm for all data in the input column
|
|||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### Aggregate function 2: [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
|
||||
|
||||
The max_vol function returns a string concatenating the deviceId column, the row number and column number of the maximum voltage and the maximum voltage given several voltage columns as input.
|
||||
|
||||
Create Table:
|
||||
```bash
|
||||
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
|
||||
```
|
||||
Create the UDF:
|
||||
```bash
|
||||
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
||||
```
|
||||
Use the UDF in the query:
|
||||
```bash
|
||||
select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>max_vol.c</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/max_vol.c}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Implement a UDF in Python
|
||||
|
||||
Implement the specified interface functions when implementing a UDF in Python.
|
||||
- implement `process` function for the scalar UDF.
|
||||
- implement `start`, `reduce`, `finish` for the aggregate UDF.
|
||||
- implement `init` for initialization and `destroy` for termination.
|
||||
|
||||
### Implement a Scalar UDF in Python
|
||||
|
||||
The implementation of a scalar UDF is described as follows:
|
||||
|
||||
```Python
|
||||
def init():
|
||||
# initialization
|
||||
def destroy():
|
||||
# destroy
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
# process input datablock,
|
||||
# datablock.data(row, col) is to access the python object in location(row,col)
|
||||
# return tuple object consisted of object of type outputtype
|
||||
```
|
||||
|
||||
### Implement an Aggregate UDF in Python
|
||||
|
||||
The implementation of an aggregate function is described as follows:
|
||||
|
||||
```Python
|
||||
def init():
|
||||
#initialization
|
||||
def destroy():
|
||||
#destroy
|
||||
def start() -> bytes:
|
||||
#return serialize(init_state)
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
# deserialize buf to state
|
||||
# reduce the inputs and state into new_state.
|
||||
# use inputs.data(i,j) to access python ojbect of location(i,j)
|
||||
# serialize new_state into new_state_bytes
|
||||
return new_state_bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
#return obj of type outputtype
|
||||
```
|
||||
|
||||
### Python UDF Interface Definition
|
||||
|
||||
#### Scalar interface
|
||||
```Python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
- `input` is a data block two-dimension matrix-like object, of which method `data(row, col)` returns the Python object located at location (`row`, `col`)
|
||||
- return a Python tuple object, of which each item is a Python object of type `output_type`
|
||||
|
||||
#### Aggregate Interface
|
||||
```Python
|
||||
def start() -> bytes:
|
||||
def reduce(input: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
```
|
||||
|
||||
- first `start()` is called to return the initial result in type `bytes`
|
||||
- then the input data are divided into multiple data blocks and for each block `input`, `reduce` is called with the data block `input` and the current result `buf` bytes and generates a new intermediate result buffer.
|
||||
- finally, the `finish` function is called on the intermediate result `buf` and outputs 0 or 1 data of type `output_type`
|
||||
|
||||
|
||||
#### Initialization and Cleanup Interface
|
||||
```Python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
Implement `init` for initialization and `destroy` for termination.
|
||||
|
||||
### Data Mapping between TDengine SQL and Python UDF
|
||||
|
||||
The following table describes the mapping between TDengine SQL data type and Python UDF Data Type. The `NULL` value of all TDengine SQL types is mapped to the `None` value in Python.
|
||||
|
||||
| **TDengine SQL Data Type** | **Python Data Type** |
|
||||
| :-----------------------: | ------------ |
|
||||
|TINYINT / SMALLINT / INT / BIGINT | int |
|
||||
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|
||||
|FLOAT / DOUBLE | float |
|
||||
|BOOL | bool |
|
||||
|BINARY / VARCHAR / NCHAR | bytes|
|
||||
|TIMESTAMP | int |
|
||||
|JSON and other types | Not Supported |
|
||||
|
||||
### Installing Python UDF
|
||||
1. Install Python package `taospyudf` that executes Python UDF
|
||||
```bash
|
||||
sudo pip install taospyudf
|
||||
ldconfig
|
||||
```
|
||||
2. If PYTHONPATH is needed to find Python packages when the Python UDF executes, include the PYTHONPATH contents into the udfdLdLibPath variable of the taos.cfg configuration file
|
||||
|
||||
### Python UDF Sample Code
|
||||
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
|
||||
|
||||
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
|
||||
|
||||
<details>
|
||||
<summary>pybitand.py</summary>
|
||||
|
||||
```Python
|
||||
{{#include tests/script/sh/pybitand.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
||||
|
||||
The `pyl2norm` function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
|
||||
<details>
|
||||
<summary>pyl2norm.py</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/pyl2norm.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Manage and Use UDF
|
||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
||||
|
|
|
@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
|
|||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
||||
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
||||
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||
- Epoch Time: timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||
|
||||
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
||||
|
@ -24,24 +24,24 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
|
||||
In TDengine, the data types below can be used when specifying a column or tag.
|
||||
|
||||
| # | **type** | **Bytes** | **Description** |
|
||||
| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||
| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
| # | **type** | **Bytes** | **Description** |
|
||||
| --- | :---------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||
| 10 | SMALLINT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
|
||||
:::note
|
||||
|
||||
|
|
|
@ -72,8 +72,8 @@ database_option: {
|
|||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
||||
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||
|
|
|
@ -33,7 +33,7 @@ column_definition:
|
|||
SHOW STABLES [LIKE tb_name_wildcard];
|
||||
```
|
||||
|
||||
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
|
||||
The preceding SQL statement shows all supertables in the current TDengine database.
|
||||
|
||||
### View the CREATE Statement for a Supertable
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ One or multiple rows can be inserted into multiple tables in a single SQL statem
|
|||
|
||||
```sql
|
||||
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||
```
|
||||
|
||||
## Automatically Create Table When Inserting
|
||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
|||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
||||
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
@ -373,7 +373,7 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
For sub-table and super table:
|
||||
For sub-table and super table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
|
|
@ -6,14 +6,14 @@ description: Use Tag Index to Improve Query Performance
|
|||
|
||||
## Introduction
|
||||
|
||||
Prior to TDengine 3.0.3.0 (excluded),only one index is created by default on the first tag of each super talbe, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
||||
Prior to TDengine 3.0.3.0 (excluded), only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
||||
|
||||
## Syntax
|
||||
|
||||
1. The syntax of creating an index
|
||||
|
||||
```sql
|
||||
CREATE INDEX index_name ON tbl_name (tagColName)
|
||||
CREATE INDEX index_name ON tbl_name (tagColName)
|
||||
```
|
||||
|
||||
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
|
||||
|
@ -48,4 +48,4 @@ You can also add filter conditions to limit the results.
|
|||
|
||||
6. You can' create index on a normal table or a child table.
|
||||
|
||||
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
|
||||
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
|
||||
|
|
|
@ -5,9 +5,9 @@ description: This document describes the standard SQL functions available in TDe
|
|||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
## Single Row Functions
|
||||
## Scalar Functions
|
||||
|
||||
Single row functions return a result for each row.
|
||||
Scalar functions return one result for each row.
|
||||
|
||||
### Mathematical Functions
|
||||
|
||||
|
@ -434,7 +434,7 @@ TO_ISO8601(expr [, timezone])
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
|
||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
|
||||
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
||||
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
||||
|
||||
|
@ -626,7 +626,7 @@ algo_type: {
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**Explanations**:
|
||||
**Explanations**:
|
||||
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||
|
@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
|
|||
ELAPSED(ts_primary_key [, time_unit])
|
||||
```
|
||||
|
||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||
|
||||
**Return value type**: Double if the input value is not NULL;
|
||||
|
||||
|
@ -680,7 +680,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
|||
|
||||
**Applicable tables**: table, STable, outer in nested query
|
||||
|
||||
**Explanations**:
|
||||
**Explanations**:
|
||||
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
||||
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
||||
|
@ -758,7 +758,7 @@ SUM(expr)
|
|||
HYPERLOGLOG(expr)
|
||||
```
|
||||
|
||||
**Description**:
|
||||
**Description**:
|
||||
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
|
||||
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
|
||||
|
||||
|
@ -772,10 +772,10 @@ HYPERLOGLOG(expr)
|
|||
### HISTOGRAM
|
||||
|
||||
```sql
|
||||
HISTOGRAM(expr,bin_type, bin_description, normalized)
|
||||
HISTOGRAM(expr, bin_type, bin_description, normalized)
|
||||
```
|
||||
|
||||
**Description**:Returns count of data points in user-specified ranges.
|
||||
**Description**: Returns count of data points in user-specified ranges.
|
||||
|
||||
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
|
||||
|
||||
|
@ -783,18 +783,18 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
|
|||
|
||||
**Applicable table types**: table, STable
|
||||
|
||||
**Explanations**:
|
||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
|
||||
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
||||
**Explanations**:
|
||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin".
|
||||
- bin_description: parameter to describe how to generate buckets can be in the following JSON formats for each bin_type respectively:
|
||||
- "user_input": "[1, 3, 5, 7]":
|
||||
User specified bin values.
|
||||
|
||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
|
||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
|
||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
||||
|
||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
|
||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
|
||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
||||
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
|
||||
|
||||
|
@ -886,7 +886,7 @@ INTERP(expr)
|
|||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||
|
||||
|
@ -1107,7 +1107,7 @@ ignore_negative: {
|
|||
**More explanation**:
|
||||
|
||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
|
||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
|
||||
|
||||
### DIFF
|
||||
|
||||
|
@ -1131,7 +1131,7 @@ ignore_negative: {
|
|||
**More explanation**:
|
||||
|
||||
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
|
||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
|
||||
|
||||
|
||||
### IRATE
|
||||
|
@ -1183,7 +1183,7 @@ STATECOUNT(expr, oper, val)
|
|||
**Applicable parameter values**:
|
||||
|
||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||
- val : Numeric types
|
||||
- val: Numeric types
|
||||
|
||||
**Return value type**: Integer
|
||||
|
||||
|
@ -1210,7 +1210,7 @@ STATEDURATION(expr, oper, val, unit)
|
|||
**Applicable parameter values**:
|
||||
|
||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||
- val : Numeric types
|
||||
- val: Numeric types
|
||||
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
|
||||
|
||||
**Return value type**: Integer
|
||||
|
|
|
@ -69,19 +69,20 @@ These pseudocolumns occur after the aggregation clause.
|
|||
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
||||
|
||||
1. NONE: No fill (the default fill mode)
|
||||
2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
||||
3. PREV:Fill with the previous non-NULL value, `FILL(PREV)`
|
||||
4. NULL:Fill with NULL, `FILL(NULL)`
|
||||
5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||
6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)`
|
||||
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
||||
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
||||
4. NULL: Fill with NULL, `FILL(NULL)`
|
||||
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||
6. NEXT: Fill with the next non-NULL value, `FILL(NEXT)`
|
||||
|
||||
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
|
||||
|
||||
1. NULL_F: Fill `NULL` by force
|
||||
2. VALUE_F: Fill `VALUE` by force
|
||||
|
||||
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
||||
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force;`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
||||
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
||||
|
||||
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force; `NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
||||
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
|
||||
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
|
||||
|
||||
|
@ -97,7 +98,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be
|
|||
|
||||
There are two kinds of time windows: sliding window and flip time/tumbling window.
|
||||
|
||||
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
||||
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e], [t1s, t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
||||
|
||||

|
||||
|
||||
|
@ -121,7 +122,7 @@ Please note that the `timezone` parameter should be configured to be the same va
|
|||
|
||||
### State Window
|
||||
|
||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].
|
||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07, 2019-04-28 14:22:10] and [2019-04-28 14:22:11, 2019-04-28 14:22:12].
|
||||
|
||||

|
||||
|
||||
|
@ -145,7 +146,7 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
|
|||
|
||||
### Session Window
|
||||
|
||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||
|
||||

|
||||
|
||||
|
@ -178,7 +179,7 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
|
|||
|
||||
### Examples
|
||||
|
||||
A table of intelligent meters can be created by the SQL statement below:
|
||||
A table of intelligent meters can be created by the SQL statement below:
|
||||
|
||||
```
|
||||
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||
|
|
|
@ -13,8 +13,11 @@ Because stream processing is built in to TDengine, you are no longer reliant on
|
|||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||
WATERMARK time
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
}
|
||||
|
||||
```
|
||||
|
@ -109,7 +112,7 @@ SHOW STREAMS;
|
|||
|
||||
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
||||
|
||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering,the default value is AT_ONCE:
|
||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering, the default value is AT_ONCE:
|
||||
|
||||
1. AT_ONCE: triggers on write
|
||||
|
||||
|
@ -141,3 +144,27 @@ The data in expired windows is tagged as expired. TDengine stream processing pro
|
|||
2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned.
|
||||
|
||||
In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated).
|
||||
|
||||
## Supported functions
|
||||
|
||||
All [scalar functions](../function/#scalar-functions) are available in stream processing. All [Aggregate functions](../function/#aggregate-functions) and [Selection functions](../function/#selection-functions) are available in stream processing, except the followings:
|
||||
- [leastsquares](../function/#leastsquares)
|
||||
- [percentile](../function/#percentile)
|
||||
- [top](../function/#top)
|
||||
- [bottom](../function/#bottom)
|
||||
- [elapsed](../function/#elapsed)
|
||||
- [interp](../function/#interp)
|
||||
- [derivative](../function/#derivative)
|
||||
- [irate](../function/#irate)
|
||||
- [twa](../function/#twa)
|
||||
- [histogram](../function/#histogram)
|
||||
- [diff](../function/#diff)
|
||||
- [statecount](../function/#statecount)
|
||||
- [stateduration](../function/#stateduration)
|
||||
- [csum](../function/#csum)
|
||||
- [mavg](../function/#mavg)
|
||||
- [sample](../function/#sample)
|
||||
- [tail](../function/#tail)
|
||||
- [unique](../function/#unique)
|
||||
- [mode](../function/#mode)
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ description: This document describes the JSON data type in TDengine.
|
|||
|
||||
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
|
||||
|
||||
- JSON format:
|
||||
- JSON format:
|
||||
|
||||
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
|
||||
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
|
||||
|
|
|
@ -20,7 +20,7 @@ description: This document describes the usage of escape characters in TDengine.
|
|||
|
||||
1. If there are escape characters in identifiers (database name, table name, column name)
|
||||
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
|
||||
- Identifier quoted with ``: Original content is kept, no escaping
|
||||
- Identifier quoted with ``: Original content is kept, no escaping
|
||||
2. If there are escape characters in values
|
||||
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
|
||||
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
|
||||
|
|
|
@ -120,6 +120,9 @@ Provides information about user-defined functions.
|
|||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
| 6 | code_len | INT | Length of the source code |
|
||||
| 7 | bufsize | INT | Buffer size |
|
||||
| 8 | func_language | BINARY(31) | UDF programming language |
|
||||
| 9 | func_body | BINARY(16384) | UDF function body |
|
||||
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated|
|
||||
|
||||
## INS_INDEXES
|
||||
|
||||
|
@ -181,7 +184,7 @@ Provides information about standard tables and subtables.
|
|||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | ---------------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: SHOW Statement
|
|||
description: This document describes how to use the SHOW statement in TDengine.
|
||||
---
|
||||
|
||||
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
|
||||
## SHOW APPS
|
||||
|
||||
|
@ -129,6 +129,14 @@ SHOW QNODES;
|
|||
|
||||
Shows information about qnodes in the system.
|
||||
|
||||
## SHOW QUERIES
|
||||
|
||||
```sql
|
||||
SHOW QUERIES;
|
||||
```
|
||||
|
||||
Shows the queries in progress in the system.
|
||||
|
||||
## SHOW SCORES
|
||||
|
||||
```sql
|
||||
|
@ -179,10 +187,10 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
Shows how table data is distributed.
|
||||
|
||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||
|
||||
```sql
|
||||
show table distributed d0\G;
|
||||
show table distributed d0\G;
|
||||
```
|
||||
|
||||
<details>
|
||||
|
@ -193,31 +201,31 @@ _block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Comp
|
|||
|
||||
Total_Blocks : Table `d0` contains total 5 blocks
|
||||
|
||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||
|
||||
Average_size: The average size of each block is 18.73 KB
|
||||
|
||||
Compression_Ratio: The data compression rate is 23.98%
|
||||
|
||||
|
||||
*************************** 2.row ***************************
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: Table `d0` contains 20,000 rows
|
||||
|
||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||
|
||||
MinRows: The minimum number of rows in a block is 3,616
|
||||
MinRows: The minimum number of rows in a block is 3,616
|
||||
|
||||
MaxRows: The maximum number of rows in a block is 4,096B
|
||||
MaxRows: The maximum number of rows in a block is 4,096B
|
||||
|
||||
Average_Rows: The average number of rows in a block is 4,000
|
||||
Average_Rows: The average number of rows in a block is 4,000
|
||||
|
||||
*************************** 3.row ***************************
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||
|
||||
Total_Tables: The number of child tables, 1 in this example
|
||||
Total_Tables: The number of child tables, 1 in this example
|
||||
|
||||
Total_Files: The number of files storing the table's data, 2 in this example
|
||||
Total_Files: The number of files storing the table's data, 2 in this example
|
||||
|
||||
*************************** 4.row ***************************
|
||||
|
||||
|
@ -353,7 +361,7 @@ SHOW VARIABLES;
|
|||
SHOW DNODE dnode_id VARIABLES;
|
||||
```
|
||||
|
||||
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
|
||||
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
|
||||
|
||||
## SHOW VGROUPS
|
||||
|
||||
|
@ -361,7 +369,7 @@ Shows the working configuration of the parameters that must be the same on each
|
|||
SHOW [db_name.]VGROUPS;
|
||||
```
|
||||
|
||||
Shows information about all vgroups in the current database.
|
||||
Shows information about all vgroups in the current database.
|
||||
|
||||
## SHOW VNODES
|
||||
|
||||
|
|
|
@ -7,17 +7,18 @@ description: This document describes the SQL statements related to user-defined
|
|||
You can create user-defined functions and import them into TDengine.
|
||||
## Create UDF
|
||||
|
||||
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
|
||||
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF is stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
|
||||
|
||||
When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition.
|
||||
|
||||
- Create Scalar Function
|
||||
```sql
|
||||
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
|
||||
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
|
||||
```
|
||||
|
||||
- function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file).
|
||||
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
|
||||
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
||||
- function_name: The scalar function name to be used in the SQL statement
|
||||
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
|
||||
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
||||
- output_type: The data type of the results of the UDF.
|
||||
|
||||
For example, the following SQL statement can be used to create a UDF from `libbitand.so`.
|
||||
|
@ -25,14 +26,20 @@ CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
|
|||
```sql
|
||||
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
|
||||
```
|
||||
For Example, the following SQL statement can be used to modify the existing function `bit_and`. The OUTPUT type is changed to BIGINT and the programming language is changed to Python.
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
- Create Aggregate Function
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
|
||||
```
|
||||
|
||||
- function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file).
|
||||
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
|
||||
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
||||
- function_name: The aggregate function name to be used in the SQL statement
|
||||
- LANGUAGE 'C|Python': the programming language of the UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
|
||||
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
||||
- output_type: The output data type, the value is the literal string of the supported TDengine data type.
|
||||
- buffer_size: The size of the intermediate buffer in bytes. This parameter is optional.
|
||||
|
||||
|
@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
|
|||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
||||
```
|
||||
For example, the following SQL statement modifies the buffer size of existing UDF `l2norm` to 64
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
||||
```
|
||||
|
||||
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
|
||||
|
||||
## Manage UDF
|
||||
|
@ -61,9 +73,9 @@ SHOW FUNCTIONS;
|
|||
|
||||
## Call UDF
|
||||
|
||||
The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example:
|
||||
The function name specified when creating UDF can be used directly in SQL statements, just like built-in functions. For example:
|
||||
```sql
|
||||
SELECT bit_and(c1,c2) FROM table;
|
||||
```
|
||||
|
||||
The above SQL statement invokes function X for column c1 and c2 on table. You can use query keywords like WHERE with user-defined functions.
|
||||
The above SQL statement invokes function X for columns c1 and c2 on the table. You can use query keywords like WHERE with user-defined functions.
|
||||
|
|
|
@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
|
|||
| - | :------- | :-------- | :------- |
|
||||
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
|
||||
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
|
||||
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consistency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
|
||||
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
|
||||
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
|
||||
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
||||
|
|
|
@ -13,7 +13,7 @@ Syntax Specifications used in this chapter:
|
|||
- Information that you input is given in lowercase.
|
||||
- \[ \] means optional input, excluding [] itself.
|
||||
- | means one of a few options, excluding | itself.
|
||||
- … means the item prior to it can be repeated multiple times.
|
||||
- ... means the item prior to it can be repeated multiple times.
|
||||
|
||||
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||
|
||||
|
|
|
@ -22,11 +22,11 @@ wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.s
|
|||
chmod +x TDinsight.sh
|
||||
```
|
||||
|
||||
Prepare:
|
||||
Prepare:
|
||||
|
||||
1. TDengine Server
|
||||
|
||||
- The URL of REST service:for example `http://localhost:6041` if TDengine is deployed locally
|
||||
- The URL of REST service: for example `http://localhost:6041` if TDengine is deployed locally
|
||||
- User name and password
|
||||
|
||||
2. Grafana Alert Notification
|
||||
|
@ -42,3 +42,304 @@ An existing Grafana Notification Channel can be specified with parameter `-E`, t
|
|||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||
|
||||
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
|
||||
|
||||
## log database
|
||||
|
||||
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
|
||||
|
||||
### cluster\_info table
|
||||
|
||||
`cluster_info` table contains cluster information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|first\_ep|VARCHAR||first ep of cluster|
|
||||
|first\_ep\_dnode\_id|INT||dnode id or first\_ep|
|
||||
|version|VARCHAR||tdengine version. such as: 3.0.4.0|
|
||||
|master\_uptime|FLOAT||days of master's uptime|
|
||||
|monitor\_interval|INT||monitor interval in second|
|
||||
|dbs\_total|INT||total number of databases in cluster|
|
||||
|tbs\_total|BIGINT||total number of tables in cluster|
|
||||
|stbs\_total|INT||total number of stables in cluster|
|
||||
|dnodes\_total|INT||total number of dnodes in cluster|
|
||||
|dnodes\_alive|INT||total number of dnodes in ready state|
|
||||
|mnodes\_total|INT||total number of mnodes in cluster|
|
||||
|mnodes\_alive|INT||total number of mnodes in ready state|
|
||||
|vgroups\_total|INT||total number of vgroups in cluster|
|
||||
|vgroups\_alive|INT||total number of vgroups in ready state|
|
||||
|vnodes\_total|INT||total number of vnode in cluster|
|
||||
|vnodes\_alive|INT||total number of vnode in ready state|
|
||||
|connections\_total|INT||total number of connections to cluster|
|
||||
|topics\_total|INT||total number of topics in cluster|
|
||||
|streams\_total|INT||total number of streams in cluster|
|
||||
|protocol|INT||protocol version|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### d\_info table
|
||||
|
||||
`d_info` table contains dnodes information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|status|VARCHAR||dnode status|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### m\_info table
|
||||
|
||||
`m_info` table contains mnode information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|role|VARCHAR||the role of mnode. leader or follower|
|
||||
|mnode\_id|INT|TAG|master node id|
|
||||
|mnode\_ep|NCHAR|TAG|master node endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### dnodes\_info table
|
||||
|
||||
`dnodes_info` table contains dnodes information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|uptime|FLOAT||dnode uptime|
|
||||
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|
||||
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|
||||
|cpu\_cores|FLOAT||cpu cores of server|
|
||||
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|
||||
|mem\_system|INT||available memory on the server|
|
||||
|mem\_total|INT||total memory of server in `KB`|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|
||||
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|
||||
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|req\_select|INT||number of select queries received per dnode|
|
||||
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|
||||
|req\_insert|INT||number of insert queries received per dnode|
|
||||
|req\_insert\_success|INT||number of successfully insert queries received per dnode|
|
||||
|req\_insert\_rate|FLOAT||number of insert queries received per dnode divided by monitor interval|
|
||||
|req\_insert\_batch|INT||number of batch insertions|
|
||||
|req\_insert\_batch\_success|INT||number of successful batch insertions|
|
||||
|req\_insert\_batch\_rate|FLOAT||number of batch insertions divided by monitor interval|
|
||||
|errors|INT||dnode errors|
|
||||
|vnodes\_num|INT||number of vnodes per dnode|
|
||||
|masters|INT||number of master vnodes|
|
||||
|has\_mnode|INT||if the dnode has mnode|
|
||||
|has\_qnode|INT||if the dnode has qnode|
|
||||
|has\_snode|INT||if the dnode has snode|
|
||||
|has\_bnode|INT||if the dnode has bnode|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### data\_dir table
|
||||
|
||||
`data_dir` table contains data directory information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||data directory. default is `/var/lib/taos`|
|
||||
|level|INT||level for multi-level storage|
|
||||
|avail|BIGINT||available space for data directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log\_dir table
|
||||
|
||||
`log_dir` table contains log directory information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||log directory. default is `/var/log/taos/`|
|
||||
|avail|BIGINT||available space for log directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### temp\_dir table
|
||||
|
||||
`temp_dir` table contains temp dir information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||temp directory. default is `/tmp/`|
|
||||
|avail|BIGINT||available space for temp directory|
|
||||
|used|BIGINT||used space for temp directory|
|
||||
|total|BIGINT||total space for temp directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vgroups\_info table
|
||||
|
||||
`vgroups_info` table contains vgroups information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|vgroup\_id|INT||vgroup id|
|
||||
|database\_name|VARCHAR||database for the vgroup|
|
||||
|tables\_num|BIGINT||number of tables per vgroup|
|
||||
|status|VARCHAR||status|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vnodes\_role table
|
||||
|
||||
`vnodes_role` table contains vnode role information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|vnode\_role|VARCHAR||role. leader or follower|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### logs table
|
||||
|
||||
`logs` table contains login information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|level|VARCHAR||log level|
|
||||
|content|NCHAR||log content|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log\_summary table
|
||||
|
||||
`log_summary` table contains log summary information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|error|INT||error count|
|
||||
|info|INT||info count|
|
||||
|debug|INT||debug count|
|
||||
|trace|INT||trace count|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### grants\_info table
|
||||
|
||||
`grants_info` table contains grants information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|expire\_time|BIGINT||time until grants expire in seconds|
|
||||
|timeseries\_used|BIGINT||timeseries used|
|
||||
|timeseries\_total|BIGINT||total timeseries|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### keeper\_monitor table
|
||||
|
||||
`keeper_monitor` table contains keeper monitor information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|cpu|FLOAT||cpu usage|
|
||||
|mem|FLOAT||memory usage|
|
||||
|identify|NCHAR|TAG||
|
||||
|
||||
### taosadapter\_restful\_http\_request\_total table
|
||||
|
||||
`taosadapter_restful_http_request_total` table contains taosadapter rest request information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|client\_ip|NCHAR|TAG|client ip|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_fail table
|
||||
|
||||
`taosadapter_restful_http_request_fail` table contains taosadapter failed rest request information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|client\_ip|NCHAR|TAG|client ip|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_in\_flight table
|
||||
|
||||
`taosadapter_restful_http_request_in_flight` table contains taosadapter rest request information record in real time. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
||||
|
||||
`taosadapter_restful_http_request_summary_milliseconds` table contains the summary or rest information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|count|DOUBLE|||
|
||||
|sum|DOUBLE|||
|
||||
|0.5|DOUBLE|||
|
||||
|0.9|DOUBLE|||
|
||||
|0.99|DOUBLE|||
|
||||
|0.1|DOUBLE|||
|
||||
|0.2|DOUBLE|||
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|
||||
### taosadapter\_system\_mem\_percent table
|
||||
|
||||
`taosadapter_system_mem_percent` table contains taosadapter memory usage information. The timestamp of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter\_system\_cpu\_percent table
|
||||
|
||||
`taosadapter_system_cpu_percent` table contains taosadapter cup usage information. The timestamp of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||mertic value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
|
|
|
@ -9,13 +9,13 @@ When a TDengine client is unable to access a TDengine server, the network connec
|
|||
|
||||
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
||||
|
||||
Diagnostic steps:
|
||||
Diagnostic steps:
|
||||
|
||||
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
|
||||
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
|
||||
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
|
||||
|
||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
||||
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
||||
|
||||
Output of the server side for the example is below:
|
||||
|
|
|
@ -83,13 +83,13 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
|
|||
|
||||
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
||||
|
||||
- authentication information is shown below:
|
||||
- authentication information is shown below:
|
||||
|
||||
```text
|
||||
Authorization: Taosd <TOKEN>
|
||||
```
|
||||
|
||||
- Basic authentication information is shown below:
|
||||
- Basic authentication information is shown below:
|
||||
|
||||
```text
|
||||
Authorization: Basic <TOKEN>
|
||||
|
|
|
@ -12,9 +12,9 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de
|
|||
|
||||
After TDengine server or client installation, `taos.h` is located at
|
||||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
- Linux: usr/local/taos/include`
|
||||
- Windows: C:\TDengine\include`
|
||||
- macOS: usr/local/include`
|
||||
|
||||
The dynamic libraries for the TDengine client driver are located in.
|
||||
|
||||
|
@ -412,7 +412,8 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
|
||||
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
|
||||
|
||||
schemaless 其他相关的接口
|
||||
schemaless interfaces:
|
||||
|
||||
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
|
||||
|
@ -423,6 +424,6 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
|
||||
**Description**
|
||||
- The above seven interfaces are extension interfaces, which are mainly used to pass ttl and reqid parameters, and can be used as needed.
|
||||
- Withing _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
|
||||
- Withing _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
|
||||
- Withing _reqid interfaces can track the entire call chain by passing the reqid parameter.
|
||||
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
|
||||
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
|
||||
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.
|
||||
|
|
|
@ -36,23 +36,110 @@ REST connection supports all platforms that can run Java.
|
|||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
|
||||
## Recent update logs
|
||||
|
||||
| taos-jdbcdriver version | major changes |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
|
||||
| 3.2.0 | This version has been deprecated |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||
| 3.0.0 | Support for TDengine 3.0 |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||
| 2.0.37 | Support json tags |
|
||||
| 2.0.36 | Support schemaless writing |
|
||||
|
||||
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
||||
|
||||
### Handling exceptions
|
||||
|
||||
After an error is reported, the error message and error code can be obtained through SQLException.
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
// print result
|
||||
printResult(resultSet);
|
||||
} catch (SQLException e) {
|
||||
System.out.println("ERROR Message: " + e.getMessage());
|
||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||
e.printStackTrace();
|
||||
}
|
||||
```
|
||||
|
||||
There are four types of error codes that the JDBC connector can report:
|
||||
|
||||
- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350),
|
||||
- Error code of the native connection method (error code between 0x2351 and 0x2360)
|
||||
- Error code of the consumer method (error code between 0x2371 and 0x2380)
|
||||
- Error code of other TDengine function modules.
|
||||
|
||||
For specific error codes, please refer to.
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | The connection has been closed, check the connection status, or recreate the connection to execute the relevant instructions. |
|
||||
| 0x2302 | this operation is NOT supported currently! | The current interface does not support the connection. You can use another connection mode. |
|
||||
| 0x2303 | invalid variables | The parameter is invalid. Check the interface specification and adjust the parameter type and size. |
|
||||
| 0x2304 | statement is closed | The statement is closed. Check whether the statement is closed and used again, or whether the connection is normal. |
|
||||
| 0x2305 | resultSet is closed | result set The result set is released. Check whether the result set is released and used again. |
|
||||
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
|
||||
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
|
||||
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
|
||||
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
|
||||
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
|
||||
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
|
||||
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
|
||||
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
|
||||
| 0x2317 | | wrong request type was used in the REST connection. |
|
||||
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
|
||||
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
|
||||
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
|
||||
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
|
||||
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
|
||||
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
|
||||
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
|
||||
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
||||
## TDengine DataType vs. Java DataType
|
||||
|
||||
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows:
|
||||
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ---------------------------------- |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ------------------ |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
|
||||
**Note**: Only TAG supports JSON types
|
||||
|
||||
|
@ -82,7 +169,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -97,7 +184,7 @@ cd taos-connector-jdbc
|
|||
mvn clean install -Dmaven.test.skip=true
|
||||
```
|
||||
|
||||
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.0.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
|
||||
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.2.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
@ -227,7 +314,7 @@ In addition to getting the connection from the specified URL, you can use Proper
|
|||
Note:
|
||||
|
||||
- The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set.
|
||||
- The following sample code is based on taos-jdbcdriver-3.0.0.
|
||||
- The following sample code is based on taos-jdbcdriver-3.1.0.
|
||||
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
|
@ -333,30 +420,6 @@ while(resultSet.next()){
|
|||
|
||||
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
||||
|
||||
### Handling exceptions
|
||||
|
||||
After an error is reported, the error message and error code can be obtained through SQLException.
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
// print result
|
||||
printResult(resultSet);
|
||||
} catch (SQLException e) {
|
||||
System.out.println("ERROR Message: " + e.getMessage());
|
||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||
e.printStackTrace();
|
||||
}
|
||||
```
|
||||
|
||||
There are three types of error codes that the JDBC connector can report: - Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), - Error code of the native connection method (error code between 0x2351 and 0x2400), and - Error code of other TDengine function modules.
|
||||
|
||||
For specific error codes, please refer to.
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
||||
### Writing data via parameter binding
|
||||
|
||||
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
||||
|
@ -364,9 +427,12 @@ TDengine has significantly improved the bind APIs to support data writing (INSER
|
|||
**Note:**
|
||||
|
||||
- JDBC REST connections do not currently support bind interface
|
||||
- The following sample code is based on taos-jdbcdriver-3.0.0
|
||||
- The following sample code is based on taos-jdbcdriver-3.2.1
|
||||
- The setString method should be called for binary type data, and the setNString method should be called for nchar type data
|
||||
- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
|
||||
- Do not use `db.?` in prepareStatement when specify the database with the table name, should directly use `?`, then specify the database in setTableName, for example: `prepareStatement.setTableName("db.t1")`.
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
```java
|
||||
public class ParameterBindingDemo {
|
||||
|
@ -594,21 +660,7 @@ public class ParameterBindingDemo {
|
|||
}
|
||||
```
|
||||
|
||||
The methods to set TAGS values:
|
||||
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
|
||||
|
||||
The methods to set VALUES columns:
|
||||
|
||||
|
@ -625,17 +677,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
|
|||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection">
|
||||
|
||||
```java
|
||||
public class ParameterBindingDemo {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 30;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
|
||||
|
||||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
|
||||
bindFloat(conn);
|
||||
|
||||
bindBoolean(conn);
|
||||
|
||||
bindBytes(conn);
|
||||
|
||||
bindString(conn);
|
||||
|
||||
conn.close();
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_ws_parabind");
|
||||
stmt.execute("create database if not exists test_ws_parabind");
|
||||
stmt.execute("use test_ws_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
stmt.execute(schemaList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t1_" + i);
|
||||
// set tags
|
||||
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setTagLong(4, random.nextLong());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setLong(5, random.nextLong());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindFloat(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||
|
||||
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t2_" + i);
|
||||
// set tags
|
||||
pstmt.setTagFloat(1, random.nextFloat());
|
||||
pstmt.setTagDouble(2, random.nextDouble());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setFloat(2, random.nextFloat());
|
||||
pstmt.setDouble(3, random.nextDouble());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBoolean(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t3_" + i);
|
||||
// set tags
|
||||
pstmt.setTagBoolean(1, random.nextBoolean());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setBoolean(2, random.nextBoolean());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBytes(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t4_" + i);
|
||||
// set tags
|
||||
pstmt.setTagString(1, new String("abc"));
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setString(2, "abc");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindString(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t5_" + i);
|
||||
// set tags
|
||||
pstmt.setTagNString(1, "California.SanFrancisco");
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(0, new Timestamp(current + j));
|
||||
pstmt.setNString(1, "California.SanFrancisco");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
The methods to set TAGS values:
|
||||
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
|
||||
### Schemaless Writing
|
||||
|
||||
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
|
||||
|
||||
Note:
|
||||
|
||||
- JDBC REST connections do not currently support schemaless writes
|
||||
- The following sample code is based on taos-jdbcdriver-3.0.0
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
```java
|
||||
public class SchemalessInsertTest {
|
||||
public class SchemalessJniTest {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||
|
@ -663,6 +901,41 @@ public class SchemalessInsertTest {
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection">
|
||||
|
||||
```java
|
||||
public class SchemalessWsTest {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
init(connection);
|
||||
|
||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
private static void init(Connection connection) throws SQLException {
|
||||
try (Statement stmt = connection.createStatement()) {
|
||||
stmt.executeUpdate("drop database if exists test_ws_schemaless");
|
||||
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
|
||||
stmt.executeUpdate("use test_ws_schemaless");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Data Subscription
|
||||
|
||||
The TDengine Java Connector supports subscription functionality with the following application API.
|
||||
|
@ -697,8 +970,8 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- group.id: consumer: Specifies the group that the consumer is in.
|
||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||
- httpConnectTimeout:WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout:socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||
|
||||
#### Subscribe to consume data
|
||||
|
@ -706,8 +979,9 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
```java
|
||||
while(true) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -760,8 +1034,9 @@ public abstract class ConsumerLoop {
|
|||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
|
@ -836,8 +1111,9 @@ public abstract class ConsumerLoop {
|
|||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
|
@ -963,20 +1239,6 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
|
||||
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
## Recent update logs
|
||||
|
||||
| taos-jdbcdriver version | major changes |
|
||||
| :---------------------: | :--------------------------------------------: |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||
| 3.0.0 | Support for TDengine 3.0 |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||
| 2.0.37 | Support json tags |
|
||||
| 2.0.36 | Support schemaless writing |
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`?
|
||||
|
@ -999,15 +1261,15 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
|
||||
4. java.lang.NoSuchMethodError: setByteArray
|
||||
|
||||
**Cause**: taos-jbdcdriver 3.* only supports TDengine 3.0 and later.
|
||||
**Cause**: taos-jbdcdriver 3.\* only supports TDengine 3.0 and later.
|
||||
|
||||
**Solution**: Use taos-jdbcdriver 2.* with your TDengine 2.* deployment.
|
||||
**Solution**: Use taos-jdbcdriver 2.\* with your TDengine 2.\* deployment.
|
||||
|
||||
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
|
||||
|
||||
**Cause**:taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
||||
**Cause**: taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
||||
|
||||
**Solution**: Use taos-jdbcdriver 3.0.2.
|
||||
**Solution**: Use taos-jdbcdriver 3.0.2.
|
||||
|
||||
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem';
|
|||
import Preparition from "./_preparation.mdx"
|
||||
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
|
||||
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||
import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx"
|
||||
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
|
||||
|
||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||
|
@ -120,7 +121,7 @@ The parameters are described as follows:
|
|||
- **username/password**: Username and password used to create connections.
|
||||
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
||||
- **database**: Specify the default database to connect to. It's optional.
|
||||
- **params**:Optional parameters.
|
||||
- **params**: Optional parameters.
|
||||
|
||||
A sample DSN description string is as follows:
|
||||
|
||||
|
@ -232,6 +233,10 @@ There are two ways to query data: Using built-in types or the [serde](https://se
|
|||
|
||||
<RustBind />
|
||||
|
||||
#### Schemaless Write
|
||||
|
||||
<RustSml />
|
||||
|
||||
### Query data
|
||||
|
||||
<RustQuery />
|
||||
|
|
|
@ -255,7 +255,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
|
|||
|
||||
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
|
||||
|
||||
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
||||
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
||||
- `user`: TDengine user name. The default is `root`.
|
||||
- `password`: TDengine user password. The default is `taosdata`.
|
||||
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
|
||||
|
|
|
@ -321,18 +321,18 @@ let cursor = conn.cursor();
|
|||
| package name | version | TDengine version | Description |
|
||||
|------------------|---------|---------------------|------------------------------------------------------------------|
|
||||
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||
| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | Fixed cursor.close() bug. |
|
||||
| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
||||
| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
||||
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
|
||||
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
||||
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
||||
### REST Connector
|
||||
|
||||
| package name | version | TDengine version | Description |
|
||||
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
|
||||
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||
| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 |
|
||||
| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
||||
| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token |
|
||||
| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
||||
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
|
||||
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
||||
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
|
||||
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
||||
|
||||
## API Reference
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ The parameters are described as follows:
|
|||
* **username/password**: Username and password used to create connections.
|
||||
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
||||
* **database**: Specify the default database to connect to. It's optional.
|
||||
* **params**:Optional parameters.
|
||||
* **params**: Optional parameters.
|
||||
|
||||
A sample DSN description string is as follows:
|
||||
|
||||
|
@ -279,7 +279,7 @@ ws://localhost:6041/test
|
|||
| TDengine.Connector | Description |
|
||||
|--------------------|--------------------------------|
|
||||
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
||||
| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding|
|
||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
||||
|
|
|
@ -8,23 +8,23 @@ description: This document describes the TDengine PHP connector.
|
|||
|
||||
PHP Connector relies on TDengine client driver.
|
||||
|
||||
Project Repository:<https://github.com/Yurunsoft/php-tdengine>
|
||||
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
|
||||
|
||||
After TDengine client or server is installed, `taos.h` is located at:
|
||||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
- Linux: `/usr/local/taos/include`
|
||||
- Windows: `C:\TDengine\include`
|
||||
- macOS: `/usr/local/include`
|
||||
|
||||
TDengine client driver is located at:
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS:`/usr/local/lib/libtaos.dylib`
|
||||
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
- Windows、Linux、MacOS
|
||||
- Windows, Linux, and macOS
|
||||
|
||||
- PHP >= 7.4
|
||||
|
||||
|
@ -44,7 +44,7 @@ Regarding how to install TDengine client driver please refer to [Install Client
|
|||
|
||||
### Install php-tdengine
|
||||
|
||||
**Download Source Code Package and Unzip:**
|
||||
**Download Source Code Package and Unzip: **
|
||||
|
||||
```shell
|
||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||
|
@ -54,13 +54,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
|
||||
**Non-Swoole Environment:**
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure && make -j && make install
|
||||
```
|
||||
|
||||
**Specify TDengine location:**
|
||||
**Specify TDengine location: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||
|
@ -69,7 +69,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
|||
> `--with-tdengine-dir=` is followed by TDengine location.
|
||||
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
|
||||
|
||||
**Swoole Environment:**
|
||||
**Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --enable-swoole && make -j && make install
|
||||
|
|
|
@ -62,7 +62,7 @@ The different database framework specifications for various programming language
|
|||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Schemaless** | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
|
|
|
@ -245,7 +245,7 @@ The parameters listed in this section apply to all function modes.
|
|||
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||
|
||||
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
|
||||
|
||||
|
||||
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
|
||||
|
||||
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
|
||||
|
|
|
@ -76,6 +76,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-A, --all-databases Dump all databases.
|
||||
-D, --databases=DATABASES Dump listed databases. Use comma to separate
|
||||
database names.
|
||||
-e, --escape-character Use escaped character for database name
|
||||
-N, --without-property Dump database without its properties.
|
||||
-s, --schemaonly Only dump table schemas.
|
||||
-y, --answer-yes Input yes for prompt. It will skip data file
|
||||
|
|
|
@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval
|
|||
|
||||
To deploy TDinsight, we need
|
||||
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
|
||||
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
|
||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
|
||||
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
|
||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
|
||||
|
||||
Please record
|
||||
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
|
||||
|
@ -233,7 +233,7 @@ After the importing is done, `TDinsight for 3.x` dashboard is available on the p
|
|||
|
||||
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
|
||||
|
||||

|
||||

|
||||
|
||||
## TDinsight dashboard details
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
|||
| -------- | -------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
||||
| Value Range | 0,1 0: Not allowed;1:allowed |
|
||||
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
||||
| Default Value | 1 |
|
||||
|
||||
|
||||
|
@ -183,7 +183,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
|||
| -------- | -------------------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||
| Vlue Range | 0:Return empty line,1:Return 0 |
|
||||
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
||||
| Default | 1 |
|
||||
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
||||
|
||||
|
@ -358,6 +358,17 @@ The charset that takes effect is UTF-8.
|
|||
| Value Range | 0-4096 |
|
||||
| Default Value | 2x the CPU cores |
|
||||
|
||||
## Performance Tuning
|
||||
|
||||
### numOfCommitThreads
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Maximum number of threads to commit |
|
||||
| Value Range | 0-1024 |
|
||||
| Default Value | |
|
||||
|
||||
## Log Parameters
|
||||
|
||||
### logDir
|
||||
|
@ -650,7 +661,7 @@ The charset that takes effect is UTF-8.
|
|||
|
||||
## 3.0 Parameters
|
||||
|
||||
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||
| 1 | firstEp | Yes | Yes | |
|
||||
| 2 | secondEp | Yes | Yes | |
|
||||
|
|
|
@ -108,7 +108,7 @@ The following `launchctl` commands can help you manage taoskeeper service:
|
|||
|
||||
#### Launch With Configuration File
|
||||
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/taoskeeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
|
||||
```shell
|
||||
$ taoskeeper -c <keeper config file>
|
||||
|
@ -153,6 +153,10 @@ database = "log"
|
|||
|
||||
# standard tables to monitor
|
||||
tables = ["normal_table"]
|
||||
|
||||
# database options for db storing metrics data
|
||||
[metrics.databaseoptions]
|
||||
cachemodel = "none"
|
||||
```
|
||||
|
||||
### Obtain Monitoring Metrics
|
||||
|
@ -203,7 +207,7 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
|
|||
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
|
||||
```
|
||||
|
||||
### check_health
|
||||
### check\_health
|
||||
|
||||
```
|
||||
$ curl -i http://127.0.0.1:6043/check_health
|
||||
|
@ -219,3 +223,29 @@ Content-Length: 19
|
|||
|
||||
{"version":"1.0.0"}
|
||||
```
|
||||
|
||||
### taoskeeper with Prometheus
|
||||
|
||||
There is `/metrics` api in taoskeeper provide TDengine metric data for Prometheus.
|
||||
|
||||
#### scrape config
|
||||
|
||||
Scrape config in Prometheus specifies a set of targets and parameters describing how to scrape metric data from endpoint. For more information, please reference to [Prometheus documents](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||
|
||||
```
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
- job_name: "taoskeeper"
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
static_configs:
|
||||
- targets: ["localhost:6043"]
|
||||
```
|
||||
|
||||
#### Dashboard
|
||||
|
||||
There is a dashboard named `TaosKeeper Prometheus Dashboard for 3.x`, which provides a monitoring dashboard similar to TInsight.
|
||||
|
||||
In Grafana, click the Dashboard menu and click `import`, enter the dashboard ID `18587` and click the `Load` button. Then finished importing `TaosKeeper Prometheus Dashboard for 3.x` dashboard.
|
||||
|
||||
|
|
|
@ -200,11 +200,16 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
|
|||
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
|
||||
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
|
||||
|
||||
:::note
|
||||
|
||||
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
|
||||
|
||||
:::
|
||||
|
||||
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
||||
|
||||

|
||||
|
||||
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
|
||||
The example to query the average system memory usage for the specified interval on each server as follows.
|
||||
|
||||

|
||||
|
@ -217,7 +222,7 @@ You can install TDinsight dashboard in data source configuration page (like `htt
|
|||
|
||||

|
||||
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
|
||||
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
|
|||
|
||||
### Edit SQL fields
|
||||
|
||||
Copy SQL bellow and paste it to the SQL edit area:
|
||||
Copy SQL bellow and paste it to the SQL edit area:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
|
@ -76,7 +76,8 @@ Select "WebHook" and fill in the request URL as the address and port of the serv
|
|||
|
||||
### Edit "action"
|
||||
|
||||
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
||||
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
||||
|
||||
```
|
||||
Basic cm9vdDp0YW9zZGF0YQ==
|
||||
```
|
||||
|
|
|
@ -46,15 +46,14 @@ Execute in any directory:
|
|||
|
||||
````
|
||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||
tar xzf confluent-7.1.1.tar.gz -C /opt/test
|
||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||
````
|
||||
|
||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
||||
|
||||
```title=".profile"
|
||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||
PATH=$CONFLUENT_HOME/bin
|
||||
export PATH
|
||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||
```
|
||||
|
||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||
|
@ -329,7 +328,15 @@ DROP DATABASE IF EXISTS test;
|
|||
CREATE DATABASE test;
|
||||
USE test;
|
||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||
|
||||
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
|
||||
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
|
||||
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
|
||||
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
|
||||
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
|
||||
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
|
||||
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
|
||||
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||
```
|
||||
|
||||
Use TDengine CLI to execute SQL script
|
||||
|
@ -384,7 +391,7 @@ confluent local services connect connector status
|
|||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||
|
||||
````
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
confluent local services connect connector unload TDengineSinkConnector
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
````
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine is a high-performance, scalable time-series database that supports SQL.
|
|||
|
||||
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
||||
|
||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
|
||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for "TDengine".
|
||||
|
||||

|
||||
|
||||
|
@ -30,8 +30,8 @@ After the connection is established, you can use Data Studio to process your dat
|
|||
|
||||

|
||||
|
||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
|
||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data - some examples are shown below.
|
||||
|
||||

|
||||
|
||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
|
||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we're sure you'll be able to gain new insights and obtain even more value from your data.
|
||||
|
|
|
@ -26,9 +26,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, a comp
|
|||
|
||||
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
|
||||
|
||||
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
|
||||
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
|
||||
|
||||
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
|
||||
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
|
||||
|
||||
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
|
||||
|
||||
|
@ -59,7 +59,7 @@ After obtaining the mnode EP list, the data node initiates the connection. It wi
|
|||
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
||||
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||
|
||||
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
||||
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it's not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
||||
|
||||
### A Typical Data Writing Process
|
||||
|
||||
|
@ -107,7 +107,7 @@ For large-scale data management, to achieve scale-out, it is generally necessary
|
|||
|
||||
VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application.
|
||||
|
||||
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
||||
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G). So TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables' quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
||||
|
||||
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
|
||||
|
||||
|
@ -132,9 +132,9 @@ Leader Vnode uses a writing process as follows:
|
|||
<center> Figure 3: TDengine Leader writing process </center>
|
||||
|
||||
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
||||
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `“wal_level”` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
|
||||
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `"wal_level"` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
|
||||
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||
4. Leader vnode Writes the data into memory and add the record to “skip list”;
|
||||
4. Leader vnode Writes the data into memory and add the record to "skip list";
|
||||
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
||||
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
||||
|
||||
|
@ -148,7 +148,7 @@ For a follower vnode, the write process as follows:
|
|||
|
||||
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
||||
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
|
||||
3. Write into memory and add the record to “skip list”.
|
||||
3. Write into memory and add the record to "skip list".
|
||||
|
||||
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
|
||||
|
||||
|
@ -156,7 +156,7 @@ Compared with Leader vnode, follower vnode has no forwarding or reply confirmati
|
|||
|
||||
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
|
||||
|
||||
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
|
||||
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It's necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
|
||||
|
||||
### Synchronous Replication
|
||||
|
||||
|
@ -192,7 +192,7 @@ When data is written to disk, the system decides whether to compress the data ba
|
|||
|
||||
### Tiered Storage
|
||||
|
||||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
|
||||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
|
||||
|
||||
dataDir format is as follows:
|
||||
|
||||
|
@ -202,7 +202,7 @@ dataDir data_path [tier_level]
|
|||
|
||||
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
|
||||
|
||||
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
||||
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
||||
|
||||
```
|
||||
dataDir /mnt/disk1/taos
|
||||
|
|
|
@ -35,7 +35,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
|
|||
|
||||
### TDengine
|
||||
|
||||
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
|
||||
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
|
||||
|
||||
## Data Connection Setup
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
|
|||
|
||||
### Install TDengine
|
||||
|
||||
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
|
||||
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
|
||||
|
||||
## Data Connection Setup
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ After migrating via DataX, we found that we can significantly improve the effici
|
|||
|
||||
### 2. Manual data migration
|
||||
|
||||
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement—written to the database.
|
||||
Suppose you need to use the multi-value model for data writing. In that case, you need to develop a tool to export data from OpenTSDB, confirm which timelines can be merged and imported into the same timeline, and then pass the time to import simultaneously through the SQL statement-written to the database.
|
||||
|
||||
Manual migration of data requires attention to the following two issues:
|
||||
|
||||
|
@ -258,7 +258,7 @@ Equivalent function: apercentile
|
|||
Example:
|
||||
|
||||
```sql
|
||||
Select apercentile(col1, 50, “t-digest”) from table_name
|
||||
select apercentile(col1, 50, "t-digest") from table_name
|
||||
```
|
||||
|
||||
Remark:
|
||||
|
|
|
@ -32,7 +32,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
|
|||
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
|
||||
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
|
||||
4. Install TDengine 3.0.
|
||||
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
|
||||
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support/).
|
||||
|
||||
### 2. How can I resolve the "Unable to establish connection" error?
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.4.1
|
||||
|
||||
<Release type="tdengine" version="3.0.4.1" />
|
||||
|
||||
## 3.0.4.0
|
||||
|
||||
<Release type="tdengine" version="3.0.4.0" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.5.0
|
||||
|
||||
<Release type="tools" version="2.5.0" />
|
||||
|
||||
## 2.4.12
|
||||
|
||||
<Release type="tools" version="2.4.12" />
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <sys/time.h>
|
||||
#include <taos.h>
|
||||
|
||||
typedef int16_t VarDataLenT;
|
||||
typedef uint16_t VarDataLenT;
|
||||
|
||||
#define TSDB_NCHAR_SIZE sizeof(int32_t)
|
||||
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <string.h>
|
||||
#include <taos.h>
|
||||
|
||||
typedef int16_t VarDataLenT;
|
||||
typedef uint16_t VarDataLenT;
|
||||
|
||||
#define TSDB_NCHAR_SIZE sizeof(int32_t)
|
||||
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
<dependency>
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecord;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||
import com.taosdata.jdbc.tmq.TMQConstants;
|
||||
import com.taosdata.jdbc.tmq.TaosConsumer;
|
||||
|
@ -64,7 +65,8 @@ public class SubscribeDemo {
|
|||
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
||||
for (Meters meter : meters) {
|
||||
for (ConsumerRecord<Meters> recode : meters) {
|
||||
Meters meter = recode.value();
|
||||
System.out.println(meter);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
use taos_query::common::SchemalessPrecision;
|
||||
use taos_query::common::SchemalessProtocol;
|
||||
use taos_query::common::SmlDataBuilder;
|
||||
|
||||
use crate::AsyncQueryable;
|
||||
use crate::AsyncTBuilder;
|
||||
use crate::TaosBuilder;
|
||||
|
||||
async fn put_json() -> anyhow::Result<()> {
|
||||
// std::env::set_var("RUST_LOG", "taos=trace");
|
||||
std::env::set_var("RUST_LOG", "taos=debug");
|
||||
pretty_env_logger::init();
|
||||
let dsn =
|
||||
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
|
||||
log::debug!("dsn: {:?}", &dsn);
|
||||
|
||||
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||
|
||||
let db = "demo_schemaless_ws";
|
||||
|
||||
client.exec(format!("drop database if exists {db}")).await?;
|
||||
|
||||
client
|
||||
.exec(format!("create database if not exists {db}"))
|
||||
.await?;
|
||||
|
||||
// should specify database before insert
|
||||
client.exec(format!("use {db}")).await?;
|
||||
|
||||
// SchemalessProtocol::Json
|
||||
let data = [
|
||||
r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#
|
||||
]
|
||||
.map(String::from)
|
||||
.to_vec();
|
||||
|
||||
// demo with all fields
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Json)
|
||||
.precision(SchemalessPrecision::Millisecond)
|
||||
.data(data.clone())
|
||||
.ttl(1000)
|
||||
.req_id(300u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default precision
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Json)
|
||||
.data(data.clone())
|
||||
.ttl(1000)
|
||||
.req_id(301u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default ttl
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Json)
|
||||
.data(data.clone())
|
||||
.req_id(302u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default req_id
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Json)
|
||||
.data(data.clone())
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
client.exec(format!("drop database if exists {db}")).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
use taos_query::common::SchemalessPrecision;
|
||||
use taos_query::common::SchemalessProtocol;
|
||||
use taos_query::common::SmlDataBuilder;
|
||||
|
||||
use crate::AsyncQueryable;
|
||||
use crate::AsyncTBuilder;
|
||||
use crate::TaosBuilder;
|
||||
|
||||
async fn put_line() -> anyhow::Result<()> {
|
||||
// std::env::set_var("RUST_LOG", "taos=trace");
|
||||
std::env::set_var("RUST_LOG", "taos=debug");
|
||||
pretty_env_logger::init();
|
||||
|
||||
let dsn =
|
||||
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
|
||||
log::debug!("dsn: {:?}", &dsn);
|
||||
|
||||
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||
|
||||
let db = "demo_schemaless_ws";
|
||||
|
||||
client.exec(format!("drop database if exists {db}")).await?;
|
||||
|
||||
client
|
||||
.exec(format!("create database if not exists {db}"))
|
||||
.await?;
|
||||
|
||||
// should specify database before insert
|
||||
client.exec(format!("use {db}")).await?;
|
||||
|
||||
let data = [
|
||||
"measurement,host=host1 field1=2i,field2=2.0 1577837300000",
|
||||
"measurement,host=host1 field1=2i,field2=2.0 1577837400000",
|
||||
"measurement,host=host1 field1=2i,field2=2.0 1577837500000",
|
||||
"measurement,host=host1 field1=2i,field2=2.0 1577837600000",
|
||||
]
|
||||
.map(String::from)
|
||||
.to_vec();
|
||||
|
||||
// demo with all fields
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Line)
|
||||
.precision(SchemalessPrecision::Millisecond)
|
||||
.data(data.clone())
|
||||
.ttl(1000)
|
||||
.req_id(100u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default ttl
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Line)
|
||||
.precision(SchemalessPrecision::Millisecond)
|
||||
.data(data.clone())
|
||||
.req_id(101u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default ttl and req_id
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Line)
|
||||
.precision(SchemalessPrecision::Millisecond)
|
||||
.data(data.clone())
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default precision
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Line)
|
||||
.data(data)
|
||||
.req_id(103u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
client.exec(format!("drop database if exists {db}")).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
use taos_query::common::SchemalessPrecision;
|
||||
use taos_query::common::SchemalessProtocol;
|
||||
use taos_query::common::SmlDataBuilder;
|
||||
|
||||
use crate::AsyncQueryable;
|
||||
use crate::AsyncTBuilder;
|
||||
use crate::TaosBuilder;
|
||||
|
||||
async fn put_telnet() -> anyhow::Result<()> {
|
||||
// std::env::set_var("RUST_LOG", "taos=trace");
|
||||
std::env::set_var("RUST_LOG", "taos=debug");
|
||||
pretty_env_logger::init();
|
||||
let dsn =
|
||||
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
|
||||
log::debug!("dsn: {:?}", &dsn);
|
||||
|
||||
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||
|
||||
let db = "demo_schemaless_ws";
|
||||
|
||||
client.exec(format!("drop database if exists {db}")).await?;
|
||||
|
||||
client
|
||||
.exec(format!("create database if not exists {db}"))
|
||||
.await?;
|
||||
|
||||
// should specify database before insert
|
||||
client.exec(format!("use {db}")).await?;
|
||||
|
||||
let data = [
|
||||
"meters.current 1648432611249 10.3 location=California.SanFrancisco group=2",
|
||||
"meters.current 1648432611250 12.6 location=California.SanFrancisco group=2",
|
||||
"meters.current 1648432611249 10.8 location=California.LosAngeles group=3",
|
||||
"meters.current 1648432611250 11.3 location=California.LosAngeles group=3",
|
||||
"meters.voltage 1648432611249 219 location=California.SanFrancisco group=2",
|
||||
"meters.voltage 1648432611250 218 location=California.SanFrancisco group=2",
|
||||
"meters.voltage 1648432611249 221 location=California.LosAngeles group=3",
|
||||
"meters.voltage 1648432611250 217 location=California.LosAngeles group=3",
|
||||
]
|
||||
.map(String::from)
|
||||
.to_vec();
|
||||
|
||||
// demo with all fields
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Telnet)
|
||||
.precision(SchemalessPrecision::Millisecond)
|
||||
.data(data.clone())
|
||||
.ttl(1000)
|
||||
.req_id(200u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default precision
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Telnet)
|
||||
.data(data.clone())
|
||||
.ttl(1000)
|
||||
.req_id(201u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default ttl
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Telnet)
|
||||
.data(data.clone())
|
||||
.req_id(202u64)
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
// demo with default req_id
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Telnet)
|
||||
.data(data.clone())
|
||||
.build()?;
|
||||
assert_eq!(client.put(&sml_data).await?, ());
|
||||
|
||||
client.exec(format!("drop database if exists {db}")).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -100,7 +100,8 @@ sudo apt-get install tdengine
|
|||
|
||||
:::tip
|
||||
apt-get 方式只适用于 Debian 或 Ubuntu 系统。
|
||||
::::
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Windows 安装" value="windows">
|
||||
|
||||
|
@ -206,6 +207,8 @@ Active: inactive (dead)
|
|||
|
||||
- 查看服务状态:`sudo launchctl list | grep taosd`
|
||||
|
||||
- 查看服务详细信息:`launchctl print system/com.tdengine.taosd`
|
||||
|
||||
:::info
|
||||
|
||||
- `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。
|
||||
|
|
|
@ -38,7 +38,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理;
|
||||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
|
||||
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false,从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
:::
|
||||
|
||||
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
|
|
@ -32,7 +32,7 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
|
|||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||
```
|
||||
|
||||
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
||||
|
||||
## 示例代码
|
||||
|
|
|
@ -47,7 +47,7 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
|||
:::note
|
||||
|
||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
|
||||
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
:::
|
||||
|
||||
## 示例代码
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
```rust
|
||||
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
|
||||
```
|
|
@ -161,7 +161,7 @@ Query OK, 6 rows in database (0.005515s)
|
|||
:::note
|
||||
|
||||
1. 无论是使用 REST 连接还是原生连接的连接器,以上示例代码都能正常工作。
|
||||
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。
|
||||
2. 唯一需要注意的是:由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。除了在 REST 参数中指定数据库以外也可以在 SQL 语句中使用 <db_name>.<table_name> 来指定数据库。
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@ import CDemo from "./_sub_c.mdx";
|
|||
|
||||
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
||||
|
||||
注意:默认是从wal消费数据,如果wal被删除,消费到的数据会不全,此时可以将参数 experimental.snapshot.enable 设置为true,从tsdb获取全部数据,但是这样的话就不能保证数据的消费顺序。所以建议根据自己的消费情况合理的设置wal的保留策略,保证可以从wal里订阅到全部数据。
|
||||
注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。
|
||||
|
||||
## 主要数据结构和 API
|
||||
|
||||
不同语言下, TMQ 订阅相关的 API 及数据结构如下:
|
||||
|
@ -221,7 +222,7 @@ void Close()
|
|||
|
||||
```sql
|
||||
DROP DATABASE IF EXISTS tmqdb;
|
||||
CREATE DATABASE tmqdb;
|
||||
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
|
||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
|
||||
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
||||
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
||||
|
@ -293,7 +294,6 @@ CREATE TOPIC topic_name AS DATABASE db_name;
|
|||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
|
||||
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
|
||||
| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据。当其关闭时,只能消费依据 WAL 保留策略仍然在WAL中的数据;当其打开时,除WAL中的数据以外,也能够消费已经从WAL中删除但落盘到TSDB中的数据 | 实验功能,默认关闭 |
|
||||
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
|
||||
|
||||
对于不同编程语言,其设置方式如下:
|
||||
|
@ -311,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
|
|||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
@ -367,7 +366,6 @@ conf := &tmq.ConfigMap{
|
|||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
|
@ -417,7 +415,6 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
|||
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
|
||||
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
|
||||
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
|
||||
| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` |
|
||||
|
||||
</TabItem>
|
||||
|
||||
|
|
|
@ -6,18 +6,20 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌
|
|||
|
||||
在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF(User Defined Function) 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 UDF 通常以数据表中的一列数据做为输入,同时支持以嵌套子查询的结果作为输入。
|
||||
|
||||
TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
|
||||
|
||||
用户可以通过 UDF 实现两类函数:标量函数和聚合函数。标量函数对每行数据输出一个值,如求绝对值 abs,正弦函数 sin,字符串拼接函数 concat 等。聚合函数对多行数据进行输出一个值,如求平均数 avg,最大值 max 等。
|
||||
|
||||
实现 UDF 时,需要实现规定的接口函数
|
||||
TDengine 支持通过 C/Python 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
|
||||
|
||||
## 用 C 语言实现 UDF
|
||||
|
||||
使用 C 语言实现 UDF 时,需要实现规定的接口函数
|
||||
- 标量函数需要实现标量接口函数 scalarfn 。
|
||||
- 聚合函数需要实现聚合接口函数 aggfn_start , aggfn , aggfn_finish。
|
||||
- 如果需要初始化,实现 udf_init;如果需要清理工作,实现udf_destroy。
|
||||
|
||||
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀(_start, _finish, _init, _destroy)的连接。列表中的scalarfn,aggfn, udf需要替换成udf函数名。
|
||||
|
||||
## 实现标量函数
|
||||
### 用 C 语言实现标量函数
|
||||
标量函数实现模板如下
|
||||
```c
|
||||
#include "taos.h"
|
||||
|
@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
|
|||
```
|
||||
scalarfn 为函数名的占位符,需要替换成函数名,如bit_and。
|
||||
|
||||
## 实现聚合函数
|
||||
### 用 C 语言实现聚合函数
|
||||
|
||||
聚合函数的实现模板如下
|
||||
```c
|
||||
|
@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
|
|||
```
|
||||
aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
||||
|
||||
## 接口函数定义
|
||||
### C 语言 UDF 接口函数定义
|
||||
|
||||
接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀(_start, _finish, _init, _destroy)的连接。以下描述中函数名称中的 scalarfn,aggfn, udf 需要替换成udf函数名。
|
||||
|
||||
|
@ -108,7 +110,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
|||
|
||||
接口函数参数类型见数据结构定义。
|
||||
|
||||
### 标量接口函数
|
||||
#### 标量函数接口
|
||||
|
||||
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
|
@ -118,7 +120,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
|||
- inputDataBlock: 输入的数据块
|
||||
- resultColumn: 输出列
|
||||
|
||||
### 聚合接口函数
|
||||
#### 聚合函数接口
|
||||
|
||||
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
|
||||
|
||||
|
@ -135,7 +137,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
|||
- result:最终结果。
|
||||
|
||||
|
||||
### UDF 初始化和销毁
|
||||
#### 初始化和销毁接口
|
||||
`int32_t udf_init()`
|
||||
|
||||
`int32_t udf_destroy()`
|
||||
|
@ -143,7 +145,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
|
|||
其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。如果没有初始化工作,无需定义udf_init函数。如果没有清理工作,无需定义udf_destroy函数。
|
||||
|
||||
|
||||
## UDF 数据结构
|
||||
### C 语言 UDF 数据结构
|
||||
```c
|
||||
typedef struct SUdfColumnMeta {
|
||||
int16_t type;
|
||||
|
@ -201,7 +203,7 @@ typedef struct SUdfInterBuf {
|
|||
|
||||
为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。
|
||||
|
||||
## 编译 UDF
|
||||
### 编译 C UDF
|
||||
|
||||
用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。
|
||||
|
||||
|
@ -213,12 +215,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
|
|||
|
||||
这样就准备好了动态链接库 libbitand.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。
|
||||
|
||||
## 管理和使用UDF
|
||||
编译好的UDF,还需要将其加入到系统才能被正常的SQL调用。关于如何管理和使用UDF,参见[UDF使用说明](../12-taos-sql/26-udf.md)
|
||||
### C UDF 示例代码
|
||||
|
||||
## 示例代码
|
||||
|
||||
### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
|
||||
#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
|
||||
|
||||
bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。
|
||||
|
||||
|
@ -231,7 +230,7 @@ bit_add 实现多列的按位与功能。如果只有一列,返回这一列。
|
|||
|
||||
</details>
|
||||
|
||||
### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
|
||||
#### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
|
||||
|
||||
l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
|
||||
|
||||
|
@ -244,7 +243,7 @@ l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先
|
|||
|
||||
</details>
|
||||
|
||||
### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/max_vol.c)
|
||||
#### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
|
||||
|
||||
max_vol 实现了从多个输入的电压列中找到最大电压,返回由设备ID + 最大电压所在(行,列)+ 最大电压值 组成的组合字符串值
|
||||
|
||||
|
@ -268,4 +267,125 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
|||
{{#include tests/script/sh/max_vol.c}}
|
||||
```
|
||||
|
||||
</details>
|
||||
</details>
|
||||
|
||||
## 用 Python 语言实现 UDF
|
||||
|
||||
使用 Python 语言实现 UDF 时,需要实现规定的接口函数
|
||||
- 标量函数需要实现标量接口函数 process 。
|
||||
- 聚合函数需要实现聚合接口函数 start ,reduce ,finish。
|
||||
- 如果需要初始化,实现 init;如果需要清理工作,实现 destroy。
|
||||
|
||||
### 用 Python 实现标量函数
|
||||
|
||||
标量函数实现模版如下
|
||||
```Python
|
||||
def init():
|
||||
# initialization
|
||||
def destroy():
|
||||
# destroy
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
# process input datablock,
|
||||
# datablock.data(row, col) is to access the python object in location(row,col)
|
||||
# return tuple object consisted of object of type outputtype
|
||||
```
|
||||
|
||||
### 用 Python 实现聚合函数
|
||||
|
||||
聚合函数实现模版如下
|
||||
```Python
|
||||
def init():
|
||||
#initialization
|
||||
def destroy():
|
||||
#destroy
|
||||
def start() -> bytes:
|
||||
#return serialize(init_state)
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
# deserialize buf to state
|
||||
# reduce the inputs and state into new_state.
|
||||
# use inputs.data(i,j) to access python ojbect of location(i,j)
|
||||
# serialize new_state into new_state_bytes
|
||||
return new_state_bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
#return obj of type outputtype
|
||||
```
|
||||
|
||||
### Python UDF 接口函数定义
|
||||
|
||||
#### 标量函数接口
|
||||
```Python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
- input:datablock 类似二维矩阵,通过成员方法 data(row,col)返回位于 row 行,col 列的 python 对象
|
||||
- 返回值是一个 Python 对象元组,每个元素类型为输出类型。
|
||||
|
||||
#### 聚合函数接口
|
||||
```Python
|
||||
def start() -> bytes:
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
```
|
||||
|
||||
首先调用 start 生成最初结果 buffer,然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果,最后再调用 finish 从中间结果 buf 产生最终输出,最终输出只能含 0 或 1 条数据。
|
||||
|
||||
|
||||
#### 初始化和销毁接口
|
||||
```Python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
|
||||
其中 init 完成初始化工作。 destroy 完成清理工作。如果没有初始化工作,无需定义 init 函数。如果没有清理工作,无需定义 destroy 函数。
|
||||
|
||||
### Python 和 TDengine之间的数据类型映射
|
||||
|
||||
下表描述了TDengine SQL数据类型和Python数据类型的映射。任何类型的NULL值都映射成Python的None值。
|
||||
|
||||
| **TDengine SQL数据类型** | **Python数据类型** |
|
||||
| :-----------------------: | ------------ |
|
||||
|TINYINT / SMALLINT / INT / BIGINT | int |
|
||||
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|
||||
|FLOAT / DOUBLE | float |
|
||||
|BOOL | bool |
|
||||
|BINARY / VARCHAR / NCHAR | bytes|
|
||||
|TIMESTAMP | int |
|
||||
|JSON and other types | 不支持 |
|
||||
|
||||
### Python UDF 环境的安装
|
||||
1. 安装 taospyudf 包。此包执行Python UDF程序。
|
||||
```bash
|
||||
sudo pip install taospyudf
|
||||
ldconfig
|
||||
```
|
||||
2. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
|
||||
|
||||
### Python UDF 示例代码
|
||||
#### 标量函数示例 [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
|
||||
|
||||
pybitand 实现多列的按位与功能。如果只有一列,返回这一列。pybitand 忽略空值。
|
||||
|
||||
<details>
|
||||
<summary>pybitand.py</summary>
|
||||
|
||||
```Python
|
||||
{{#include tests/script/sh/pybitand.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### 聚合函数示例 [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
||||
|
||||
pyl2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
|
||||
|
||||
<details>
|
||||
<summary>pyl2norm.py</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/pyl2norm.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 管理和使用 UDF
|
||||
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
|
||||
|
||||
|
|
|
@ -36,23 +36,110 @@ REST 连接支持所有能运行 Java 的平台。
|
|||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
## 最近更新记录
|
||||
|
||||
| taos-jdbcdriver 版本 | 主要变化 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 |
|
||||
| 3.2.0 | 存在连接问题,不推荐使用 |
|
||||
| 3.1.0 | WebSocket 连接支持订阅功能 |
|
||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
|
||||
| 3.0.0 | 支持 TDengine 3.0 |
|
||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
||||
| 2.0.37 | 增加对 json tag 支持 |
|
||||
| 2.0.36 | 增加对 schemaless 写入支持 |
|
||||
|
||||
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||
|
||||
## 处理异常
|
||||
|
||||
在报错后,通过 SQLException 可以获取到错误的信息和错误码:
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
// print result
|
||||
printResult(resultSet);
|
||||
} catch (SQLException e) {
|
||||
System.out.println("ERROR Message: " + e.getMessage());
|
||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||
e.printStackTrace();
|
||||
}
|
||||
```
|
||||
|
||||
JDBC 连接器可能报错的错误码包括 4 种:
|
||||
|
||||
- JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间)
|
||||
- 原生连接方法的报错(错误码在 0x2351 到 0x2360 之间)
|
||||
- 数据订阅的报错(错误码在 0x2371 到 0x2380 之间)
|
||||
- TDengine 其他功能模块的报错。
|
||||
|
||||
具体的错误码请参考:
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
||||
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
||||
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
||||
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
||||
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
||||
## TDengine DataType 和 Java DataType
|
||||
|
||||
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
|
||||
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ---------------------------------- |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ------------------ |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
|
||||
**注意**:JSON 类型仅在 tag 中支持。
|
||||
|
||||
|
@ -82,7 +169,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -97,7 +184,7 @@ cd taos-connector-jdbc
|
|||
mvn clean install -Dmaven.test.skip=true
|
||||
```
|
||||
|
||||
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
||||
编译后,在 target 目录下会产生 taos-jdbcdriver-3.2.\*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
@ -230,7 +317,7 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFra
|
|||
**注意**:
|
||||
|
||||
- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。
|
||||
- 以下示例代码基于 taos-jdbcdriver-3.0.0。
|
||||
- 以下示例代码基于 taos-jdbcdriver-3.1.0。
|
||||
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
|
@ -272,7 +359,7 @@ properties 中的配置参数如下:
|
|||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms,默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms, 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
|
||||
- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。
|
||||
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
|
||||
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
|
||||
|
||||
### 配置参数的优先级
|
||||
|
||||
|
@ -336,30 +423,6 @@ while(resultSet.next()){
|
|||
|
||||
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
|
||||
|
||||
### 处理异常
|
||||
|
||||
在报错后,通过 SQLException 可以获取到错误的信息和错误码:
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
// print result
|
||||
printResult(resultSet);
|
||||
} catch (SQLException e) {
|
||||
System.out.println("ERROR Message: " + e.getMessage());
|
||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||
e.printStackTrace();
|
||||
}
|
||||
```
|
||||
|
||||
JDBC 连接器可能报错的错误码包括 3 种:JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间),原生连接方法的报错(错误码在 0x2351 到 0x2400 之间),TDengine 其他功能模块的报错。
|
||||
|
||||
具体的错误码请参考:
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
||||
### 通过参数绑定写入数据
|
||||
|
||||
TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
|
||||
|
@ -367,9 +430,12 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据
|
|||
**注意**:
|
||||
|
||||
- JDBC REST 连接目前不支持参数绑定
|
||||
- 以下示例代码基于 taos-jdbcdriver-3.0.0
|
||||
- 以下示例代码基于 taos-jdbcdriver-3.2.1
|
||||
- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法
|
||||
- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
|
||||
- 预处理语句中指定数据库与子表名称不要使用 `db.?`,应直接使用 `?`,然后在 setTableName 中指定数据库,如:`prepareStatement.setTableName("db.t1")`。
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="原生连接">
|
||||
|
||||
```java
|
||||
public class ParameterBindingDemo {
|
||||
|
@ -597,21 +663,7 @@ public class ParameterBindingDemo {
|
|||
}
|
||||
```
|
||||
|
||||
用于设定 TAGS 取值的方法总共有:
|
||||
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
**注**:setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
|
||||
|
||||
用于设定 VALUES 数据列的取值的方法总共有:
|
||||
|
||||
|
@ -628,17 +680,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
|
|||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="WebSocket 连接">
|
||||
|
||||
```java
|
||||
public class ParameterBindingDemo {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 30;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
|
||||
|
||||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
|
||||
bindFloat(conn);
|
||||
|
||||
bindBoolean(conn);
|
||||
|
||||
bindBytes(conn);
|
||||
|
||||
bindString(conn);
|
||||
|
||||
conn.close();
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_ws_parabind");
|
||||
stmt.execute("create database if not exists test_ws_parabind");
|
||||
stmt.execute("use test_ws_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
stmt.execute(schemaList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t1_" + i);
|
||||
// set tags
|
||||
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setTagLong(4, random.nextLong());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setLong(5, random.nextLong());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindFloat(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||
|
||||
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t2_" + i);
|
||||
// set tags
|
||||
pstmt.setTagFloat(1, random.nextFloat());
|
||||
pstmt.setTagDouble(2, random.nextDouble());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setFloat(2, random.nextFloat());
|
||||
pstmt.setDouble(3, random.nextDouble());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBoolean(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t3_" + i);
|
||||
// set tags
|
||||
pstmt.setTagBoolean(1, random.nextBoolean());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setBoolean(2, random.nextBoolean());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBytes(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t4_" + i);
|
||||
// set tags
|
||||
pstmt.setTagString(1, new String("abc"));
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setString(2, "abc");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindString(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t5_" + i);
|
||||
// set tags
|
||||
pstmt.setTagNString(1, "California.SanFrancisco");
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(0, new Timestamp(current + j));
|
||||
pstmt.setNString(1, "California.SanFrancisco");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
用于设定 TAGS 取值的方法总共有:
|
||||
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
|
||||
### 无模式写入
|
||||
|
||||
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
|
||||
|
||||
**注意**:
|
||||
|
||||
- JDBC REST 连接目前不支持无模式写入
|
||||
- 以下示例代码基于 taos-jdbcdriver-3.0.0
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="原生连接">
|
||||
|
||||
```java
|
||||
public class SchemalessInsertTest {
|
||||
public class SchemalessJniTest {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||
|
@ -666,6 +904,41 @@ public class SchemalessInsertTest {
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket 连接">
|
||||
|
||||
```java
|
||||
public class SchemalessWsTest {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
init(connection);
|
||||
|
||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
private static void init(Connection connection) throws SQLException {
|
||||
try (Statement stmt = connection.createStatement()) {
|
||||
stmt.executeUpdate("drop database if exists test_ws_schemaless");
|
||||
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
|
||||
stmt.executeUpdate("use test_ws_schemaless");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### 数据订阅
|
||||
|
||||
TDengine Java 连接器支持订阅功能,应用 API 如下:
|
||||
|
@ -702,15 +975,16 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- td.connect.type: 连接方式。jni:表示使用动态库连接的方式,ws/WebSocket:表示使用 WebSocket 进行数据通信。默认为 jni 方式。
|
||||
- httpConnectTimeout:创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout:数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
```java
|
||||
while(true) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -761,8 +1035,9 @@ public abstract class ConsumerLoop {
|
|||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
|
@ -839,8 +1114,9 @@ public abstract class ConsumerLoop {
|
|||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
|
@ -966,20 +1242,6 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
## 最近更新记录
|
||||
|
||||
| taos-jdbcdriver 版本 | 主要变化 |
|
||||
| :------------------: | :----------------------------: |
|
||||
| 3.1.0 | WebSocket 连接支持订阅功能 |
|
||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
|
||||
| 3.0.0 | 支持 TDengine 3.0 |
|
||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
||||
| 2.0.37 | 增加对 json tag 支持 |
|
||||
| 2.0.36 | 增加对 schemaless 写入支持 |
|
||||
|
||||
## 常见问题
|
||||
|
||||
1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升?
|
||||
|
@ -1002,9 +1264,9 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
4. java.lang.NoSuchMethodError: setByteArray
|
||||
|
||||
**原因**:taos-jdbcdriver 3.* 版本仅支持 TDengine 3.0 及以上版本。
|
||||
**原因**:taos-jdbcdriver 3.\* 版本仅支持 TDengine 3.0 及以上版本。
|
||||
|
||||
**解决方法**: 使用 taos-jdbcdriver 2.* 版本连接 TDengine 2.* 版本。
|
||||
**解决方法**: 使用 taos-jdbcdriver 2.\* 版本连接 TDengine 2.\* 版本。
|
||||
|
||||
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import TabItem from '@theme/TabItem';
|
|||
import Preparation from "./_preparation.mdx"
|
||||
import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
|
||||
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||
import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx"
|
||||
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
|
||||
|
||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||
|
@ -230,6 +231,10 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
|
|||
|
||||
<RustBind />
|
||||
|
||||
#### Schemaless 写入
|
||||
|
||||
<RustSml />
|
||||
|
||||
### 查询数据
|
||||
|
||||
<RustQuery />
|
||||
|
|
|
@ -61,7 +61,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
|
||||
| **Schemaless** | 支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||
|
||||
|
|
|
@ -71,8 +71,8 @@ database_option: {
|
|||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
|
||||
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
|
||||
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
|
||||
- TABLE_PREFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
|
||||
- TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。
|
||||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。
|
||||
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
|
||||
|
|
|
@ -33,7 +33,7 @@ column_definition:
|
|||
SHOW STABLES [LIKE tb_name_wildcard];
|
||||
```
|
||||
|
||||
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
|
||||
查看数据库内全部超级表。
|
||||
|
||||
### 显示一个超级表的创建语句
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
|||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
||||
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
|
|
@ -888,7 +888,7 @@ INTERP(expr)
|
|||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
||||
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ stream_options: {
|
|||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
```
|
||||
|
@ -169,7 +170,7 @@ T3 时刻,最新事件到达,T 向后推移超过了第二个窗口关闭的
|
|||
在 window_close 或 max_delay 模式下,窗口关闭直接影响推送结果。在 at_once 模式下,窗口关闭只与内存占用有关。
|
||||
|
||||
|
||||
## 流式计算的过期数据处理策略
|
||||
## 流式计算对于过期数据的处理策略
|
||||
|
||||
对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据.
|
||||
|
||||
|
@ -177,11 +178,20 @@ TDengine 对于过期数据提供两种处理方式,由 IGNORE EXPIRED 选项
|
|||
|
||||
1. 重新计算,即 IGNORE EXPIRED 0:从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果
|
||||
|
||||
2. 直接丢弃, 即 IGNORE EXPIRED 1:默认配置,忽略过期数据
|
||||
2. 直接丢弃,即 IGNORE EXPIRED 1:默认配置,忽略过期数据
|
||||
|
||||
|
||||
无论在哪种模式下,watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。
|
||||
|
||||
## 流式计算对于修改数据的处理策略
|
||||
|
||||
TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项指定:
|
||||
|
||||
1. 检查数据是否被修改,即 IGNORE UPDATE 0:默认配置,如果被修改,则重新计算对应窗口。
|
||||
|
||||
2. 不检查数据是否被修改,全部按增量数据计算,即 IGNORE UPDATE 1。
|
||||
|
||||
|
||||
## 写入已存在的超级表
|
||||
```sql
|
||||
[field1_name,...]
|
||||
|
@ -213,3 +223,29 @@ DELETE_MARK time
|
|||
```
|
||||
DELETE_MARK用于删除缓存的窗口状态,也就是删除流计算的中间结果。如果不设置,默认值是10年
|
||||
T = 最新事件时间 - DELETE_MARK
|
||||
|
||||
## 流式计算支持的函数
|
||||
|
||||
1. 所有的 [单行函数](../function/#单行函数) 均可用于流计算。
|
||||
2. 以下 19 个聚合/选择函数 <b>不能</b> 应用在创建流计算的 SQL 语句。此外的其他类型的函数均可用于流计算。
|
||||
|
||||
- [leastsquares](../function/#leastsquares)
|
||||
- [percentile](../function/#percentile)
|
||||
- [top](../function/#top)
|
||||
- [bottom](../function/#bottom)
|
||||
- [elapsed](../function/#elapsed)
|
||||
- [interp](../function/#interp)
|
||||
- [derivative](../function/#derivative)
|
||||
- [irate](../function/#irate)
|
||||
- [twa](../function/#twa)
|
||||
- [histogram](../function/#histogram)
|
||||
- [diff](../function/#diff)
|
||||
- [statecount](../function/#statecount)
|
||||
- [stateduration](../function/#stateduration)
|
||||
- [csum](../function/#csum)
|
||||
- [mavg](../function/#mavg)
|
||||
- [sample](../function/#sample)
|
||||
- [tail](../function/#tail)
|
||||
- [unique](../function/#unique)
|
||||
- [mode](../function/#mode)
|
||||
|
||||
|
|
|
@ -120,6 +120,10 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | code_len | INT | 代码长度 |
|
||||
| 7 | bufsize | INT | buffer 大小 |
|
||||
| 8 | func_language | BINARY(31) | 自定义函数编程语言 |
|
||||
| 9 | func_body | BINARY(16384) | 函数体定义 |
|
||||
| 10 | func_version | INT | 函数版本号。初始版本为0,每次替换更新,版本号加1。|
|
||||
|
||||
|
||||
## INS_INDEXES
|
||||
|
||||
|
|
|
@ -129,6 +129,14 @@ SHOW QNODES;
|
|||
|
||||
显示当前系统中 QNODE (查询节点)的信息。
|
||||
|
||||
## SHOW QUERIES
|
||||
|
||||
```sql
|
||||
SHOW QUERIES;
|
||||
```
|
||||
|
||||
显示当前系统中正在进行的查询。
|
||||
|
||||
## SHOW SCORES
|
||||
|
||||
```sql
|
||||
|
|
|
@ -13,27 +13,34 @@ description: 使用 UDF 的详细指南
|
|||
|
||||
- 创建标量函数
|
||||
```sql
|
||||
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
|
||||
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
|
||||
```
|
||||
|
||||
- function_name:标量函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udf 的实际名称一致;
|
||||
- library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
|
||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:标量函数未来在 SQL 中被调用时的函数名;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。 如果这个从句忽略,编程语言是C语言
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型名称;
|
||||
|
||||
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF:
|
||||
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF:
|
||||
|
||||
```sql
|
||||
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
|
||||
```
|
||||
|
||||
例如,使用以下语句可以修改已经定义的 bit_and 函数,输出类型是 BIGINT,使用Python语言实现。
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
|
||||
```
|
||||
- 创建聚合函数:
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
|
||||
CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ] [LANGUAGE 'C|Python'];
|
||||
```
|
||||
|
||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
|
||||
- library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
|
||||
- output_type:此函数计算结果的数据类型名称;
|
||||
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
|
||||
|
||||
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF:
|
||||
|
@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
|
|||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
||||
```
|
||||
例如,使用以下语句可以修改已经定义的 l2norm 函数的缓冲区大小为64。
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
||||
```
|
||||
|
||||
关于如何开发自定义函数,请参考 [UDF使用说明](/develop/udf)。
|
||||
|
||||
## 管理 UDF
|
||||
|
|
|
@ -27,13 +27,13 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| - | :------- | :-------- | :------- |
|
||||
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
|
||||
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。<br/>调整</li><li>REPLICA:3.0.0版本暂不支持修改。</li><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改超级表的注释。</li></ul>
|
||||
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改表的注释。</li><li>TTL:修改表的生命周期。</li></ul>
|
||||
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
||||
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
|
||||
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 9 | CREATE DATABASE | 调整 | 废除<ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。<br/>新增</li><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_ROLL_PERIOD:wal文件切换时长。</li><li>WAL_SEGMENT_SIZE:wal单个文件大小。<br/>调整</li><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_ROLL_PERIOD:wal文件切换时长。</li><li>WAL_SEGMENT_SIZE:wal单个文件大小。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
|
||||
| 12 | CREATE MNODE | 新增 | 创建管理节点。
|
||||
|
|
|
@ -79,6 +79,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-A, --all-databases Dump all databases.
|
||||
-D, --databases=DATABASES Dump inputted databases. Use comma to separate
|
||||
databases' name.
|
||||
-e, --escape-character Use escaped character for database name
|
||||
-N, --without-property Dump database without its properties.
|
||||
-s, --schemaonly Only dump tables' schema.
|
||||
-y, --answer-yes Input yes for prompt. It will skip data file
|
||||
|
|
|
@ -356,7 +356,7 @@ charset 的有效值是 UTF-8。
|
|||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | dnode 支持的最大 vnode 数目 |
|
||||
| 取值范围 | 0-4096 |
|
||||
| 缺省值 | CPU 核数的 2 倍 |
|
||||
| 缺省值 | CPU 核数的 2 倍 |
|
||||
|
||||
## 性能调优
|
||||
|
||||
|
@ -366,6 +366,7 @@ charset 的有效值是 UTF-8。
|
|||
| -------- | ---------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 设置写入线程的最大数量 |
|
||||
| 取值范围 | 0-1024 |
|
||||
| 缺省值 | |
|
||||
|
||||
## 日志相关
|
||||
|
@ -734,7 +735,6 @@ charset 的有效值是 UTF-8。
|
|||
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||
| 19 | keepColumnName | 是 | 否 | 3.0 行为未知 |
|
||||
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||
|
|
|
@ -111,7 +111,7 @@ Active: inactive (dead)
|
|||
|
||||
#### 配置文件启动
|
||||
|
||||
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
|
||||
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/taoskeeper.toml` 配置,否则将使用默认配置。
|
||||
|
||||
```shell
|
||||
$ taoskeeper -c <keeper config file>
|
||||
|
@ -156,6 +156,10 @@ database = "log"
|
|||
|
||||
# 指定需要监控的普通表
|
||||
tables = []
|
||||
|
||||
# database options for db storing metrics data
|
||||
[metrics.databaseoptions]
|
||||
cachemodel = "none"
|
||||
```
|
||||
|
||||
### 获取监控指标
|
||||
|
@ -206,7 +210,7 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
|
|||
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
|
||||
```
|
||||
|
||||
### check_health
|
||||
### check\_health
|
||||
|
||||
```
|
||||
$ curl -i http://127.0.0.1:6043/check_health
|
||||
|
@ -222,3 +226,29 @@ Content-Length: 19
|
|||
|
||||
{"version":"1.0.0"}
|
||||
```
|
||||
|
||||
### 集成 Prometheus
|
||||
|
||||
taoskeeper 提供了 `/metrics` 接口,返回了 Prometheus 格式的监控数据,Prometheus 可以从 taoskeeper 抽取监控数据,实现通过 Prometheus 监控 TDengine 的目的。
|
||||
|
||||
#### 抽取配置
|
||||
|
||||
Prometheus 提供了 `scrape_configs` 配置如何从 endpoint 抽取监控数据,通常只需要修改 `static_configs` 中的 targets 配置为 taoskeeper 的 endpoint 地址,更多配置信息请参考 [Prometheus 配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)。
|
||||
|
||||
```
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
- job_name: "taoskeeper"
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
static_configs:
|
||||
- targets: ["localhost:6043"]
|
||||
```
|
||||
|
||||
#### Dashboard
|
||||
|
||||
我们提供了 `TaosKeeper Prometheus Dashboard for 3.x` dashboard,提供了和 TDinsight 类似的监控 dashboard。
|
||||
|
||||
在 Grafana Dashboard 菜单点击 `import`,dashboard ID 填写 `18587`,点击 `Load` 按钮即可导入 `TaosKeeper Prometheus Dashboard for 3.x` dashboard。
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|first\_ep\_dnode\_id|INT||集群 first ep 的 dnode id|
|
||||
|version|VARCHAR||tdengine version。例如:3.0.4.0|
|
||||
|master\_uptime|FLOAT||当前 master 节点的uptime。单位:天|
|
||||
|monitor_interval|INT||monitor interval。单位:秒|
|
||||
|monitor\_interval|INT||monitor interval。单位:秒|
|
||||
|dbs\_total|INT||database 总数|
|
||||
|tbs\_total|BIGINT||当前集群 table 总数|
|
||||
|stbs\_total|INT||当前集群 stable 总数|
|
||||
|
@ -107,17 +107,17 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|
||||
|cpu\_cores|FLOAT||服务器 cpu 核数|
|
||||
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|
||||
|mem\_system|INT||服务器内存使用率|
|
||||
|mem\_system|INT||服务器可用内存|
|
||||
|mem\_total|INT||服务器内存总量,单位 KB|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|
||||
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 bytes per second|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 bytes per second|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 bytes per second|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 bytes per second|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 bytes per second|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 bytes per second|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 kb/s|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 kb/s|
|
||||
|req\_select|INT||两个间隔内发生的查询请求数目|
|
||||
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|
||||
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
|
||||
|
|
|
@ -200,6 +200,12 @@ docker run -d \
|
|||
- Group by column name(s): **半角**逗号分隔的 `group by` 或 `partition by` 列名。如果是 `group by` or `partition by` 查询语句,设置 `Group by` 列,可以展示多维数据。例如:INPUT SQL 为 `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)`,设置 Group by 列名为 `dnode_ep`,可以按 `dnode_ep` 展示数据。
|
||||
- Format to: Group by 或 Partition by 场景下多维数据 legend 格式化格式。例如上述 INPUT SQL,将 Format to 设置为 `mem_system_{{dnode_ep}}`,展示的 legend 名字为格式化的列名。
|
||||
|
||||
:::note
|
||||
|
||||
由于 REST 接口无状态, 不能使用 `use db` 语句来切换数据库。Grafana 插件中 SQL 语句中可以使用 <db_name>.<table_name> 来指定数据库。
|
||||
|
||||
:::
|
||||
|
||||
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
|
||||
|
||||

|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue