Merge branch 'main' of https://github.com/taosdata/TDengine into fix/TD-22764
This commit is contained in:
commit
6a15c36fc1
|
@ -130,3 +130,4 @@ tools/COPYING
|
||||||
tools/BUGS
|
tools/BUGS
|
||||||
tools/taos-tools
|
tools/taos-tools
|
||||||
tools/taosws-rs
|
tools/taosws-rs
|
||||||
|
tags
|
||||||
|
|
|
@ -173,7 +173,7 @@ def pre_test_build_mac() {
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${WK}/debug
|
cd ${WK}/debug
|
||||||
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false
|
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
|
||||||
make -j10
|
make -j10
|
||||||
ctest -j10 || exit 7
|
ctest -j10 || exit 7
|
||||||
'''
|
'''
|
||||||
|
@ -423,7 +423,7 @@ pipeline {
|
||||||
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
|
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
|
||||||
}
|
}
|
||||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||||
timeout(time: 120, unit: 'MINUTES'){
|
timeout(time: 130, unit: 'MINUTES'){
|
||||||
pre_test()
|
pre_test()
|
||||||
script {
|
script {
|
||||||
sh '''
|
sh '''
|
||||||
|
|
10
README-CN.md
10
README-CN.md
|
@ -104,6 +104,16 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
|
||||||
sudo yum config-manager --set-enabled Powertools
|
sudo yum config-manager --set-enabled Powertools
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### CentOS + devtoolset
|
||||||
|
|
||||||
|
除上述编译依赖包,需要执行以下命令:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo yum install centos-release-scl
|
||||||
|
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
|
||||||
|
scl enable devtoolset-9 -- bash
|
||||||
|
```
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
10
README.md
10
README.md
|
@ -111,6 +111,16 @@ If the PowerTools installation fails, you can try to use:
|
||||||
sudo yum config-manager --set-enabled powertools
|
sudo yum config-manager --set-enabled powertools
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### For CentOS + devtoolset
|
||||||
|
|
||||||
|
Besides above dependencies, please run following commands:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo yum install centos-release-scl
|
||||||
|
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
|
||||||
|
scl enable devtoolset-9 -- bash
|
||||||
|
```
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
cmake_minimum_required(VERSION 3.0)
|
cmake_minimum_required(VERSION 3.0)
|
||||||
|
|
||||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
||||||
|
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||||
|
|
||||||
#set output directory
|
#set output directory
|
||||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||||
|
|
|
@ -21,7 +21,7 @@ IF (TD_LINUX)
|
||||||
ELSEIF (TD_WINDOWS)
|
ELSEIF (TD_WINDOWS)
|
||||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
|
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
|
||||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")
|
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER} ${TD_BUILD_TAOSA_INTERNAL})")
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF (TD_DARWIN)
|
||||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
|
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
|
||||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||||
|
|
|
@ -37,6 +37,21 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
||||||
SET(TD_LINUX_32 TRUE)
|
SET(TD_LINUX_32 TRUE)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
EXECUTE_PROCESS(COMMAND chmod 777 ${CMAKE_CURRENT_LIST_DIR}/../packaging/tools/get_os.sh)
|
||||||
|
EXECUTE_PROCESS(COMMAND readlink /bin/sh OUTPUT_VARIABLE SHELL_LINK)
|
||||||
|
MESSAGE(STATUS "The shell is: " ${SHELL_LINK})
|
||||||
|
|
||||||
|
IF (${SHELL_LINK} MATCHES "dash")
|
||||||
|
EXECUTE_PROCESS(COMMAND ${CMAKE_CURRENT_LIST_DIR}/../packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
|
||||||
|
ELSE ()
|
||||||
|
EXECUTE_PROCESS(COMMAND sh ${CMAKE_CURRENT_LIST_DIR}/../packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
|
||||||
|
ENDIF ()
|
||||||
|
MESSAGE(STATUS "The current OS is " ${TD_OS_INFO})
|
||||||
|
IF (${TD_OS_INFO} MATCHES "Alpine")
|
||||||
|
SET(TD_ALPINE TRUE)
|
||||||
|
ADD_DEFINITIONS("-D_ALPINE")
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||||
|
|
||||||
SET(TD_DARWIN TRUE)
|
SET(TD_DARWIN TRUE)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosws-rs
|
# taosws-rs
|
||||||
ExternalProject_Add(taosws-rs
|
ExternalProject_Add(taosws-rs
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||||
GIT_TAG f406d51
|
GIT_TAG main
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
title: TDengine Documentation
|
title: TDengine Documentation
|
||||||
sidebar_label: Documentation Home
|
sidebar_label: Documentation Home
|
||||||
|
description: This website contains the user manuals for TDengine, an open-source, cloud-native time-series database optimized for IoT, Connected Cars, and Industrial IoT.
|
||||||
slug: /
|
slug: /
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Introduction
|
title: Introduction
|
||||||
|
description: This document introduces the major features, competitive advantages, typical use cases, and benchmarks of TDengine.
|
||||||
toc_max_heading_level: 2
|
toc_max_heading_level: 2
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Concepts
|
title: Concepts
|
||||||
|
description: This document describes the basic concepts of TDengine, including the supertable.
|
||||||
---
|
---
|
||||||
|
|
||||||
In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
|
In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Docker
|
|
||||||
title: Quick Install on Docker
|
title: Quick Install on Docker
|
||||||
|
sidebar_label: Docker
|
||||||
|
description: This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||||
---
|
---
|
||||||
|
|
||||||
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Package
|
|
||||||
title: Quick Install from Package
|
title: Quick Install from Package
|
||||||
|
sidebar_label: Package
|
||||||
|
description: This document describes how to install TDengine on Linux, Windows, and macOS and perform queries and inserts.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Get Started
|
title: Get Started
|
||||||
description: This article describes how to install TDengine and test its performance.
|
description: This document describes how to install TDengine on various platforms.
|
||||||
---
|
---
|
||||||
|
|
||||||
import GitHubSVG from './github.svg'
|
import GitHubSVG from './github.svg'
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Connect
|
|
||||||
title: Connect to TDengine
|
title: Connect to TDengine
|
||||||
description: "How to establish connections to TDengine and how to install and use TDengine connectors."
|
sidebar_label: Connect
|
||||||
|
description: This document describes how to establish connections to TDengine and how to install and use TDengine connectors.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Data Model
|
title: Data Model
|
||||||
|
description: This document describes the data model of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
|
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Insert Using SQL
|
title: Insert Using SQL
|
||||||
|
description: This document describes how to insert data into TDengine using SQL.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
@ -29,25 +30,31 @@ Application programs can execute `INSERT` statement through connectors to insert
|
||||||
The below SQL statement is used to insert one row into table "d1001".
|
The below SQL statement is used to insert one row into table "d1001".
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
|
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||||
|
|
||||||
### Insert Multiple Rows
|
### Insert Multiple Rows
|
||||||
|
|
||||||
Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001".
|
Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001".
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
|
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||||
|
|
||||||
### Insert into Multiple Tables
|
### Insert into Multiple Tables
|
||||||
|
|
||||||
Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002".
|
Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002".
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
|
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||||
|
|
||||||
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
|
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Write from Kafka
|
title: Write from Kafka
|
||||||
|
description: This document describes how to insert data into TDengine using Kafka.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: InfluxDB Line Protocol
|
|
||||||
title: InfluxDB Line Protocol
|
title: InfluxDB Line Protocol
|
||||||
|
sidebar_label: InfluxDB Line Protocol
|
||||||
|
description: This document describes how to insert data into TDengine using the InfluxDB Line Protocol.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
@ -38,7 +39,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
||||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
||||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
||||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: OpenTSDB Line Protocol
|
|
||||||
title: OpenTSDB Line Protocol
|
title: OpenTSDB Line Protocol
|
||||||
|
sidebar_label: OpenTSDB Line Protocol
|
||||||
|
description: This document describes how to insert data into TDengine using the OpenTSDB Line Protocol.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: OpenTSDB JSON Protocol
|
|
||||||
title: OpenTSDB JSON Protocol
|
title: OpenTSDB JSON Protocol
|
||||||
|
sidebar_label: OpenTSDB JSON Protocol
|
||||||
|
description: This document describes how to insert data into TDengine using the OpenTSDB JSON protocol.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
@ -47,7 +48,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||||
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
|
||||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: High Performance Writing
|
|
||||||
title: High Performance Writing
|
title: High Performance Writing
|
||||||
|
sidebar_label: High Performance Writing
|
||||||
|
description: This document describes how to achieve high performance when writing data into TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
|
|
@ -53,8 +53,69 @@ for p in ps:
|
||||||
|
|
||||||
In addition to python's built-in multithreading and multiprocessing library, we can also use the third-party library gunicorn.
|
In addition to python's built-in multithreading and multiprocessing library, we can also use the third-party library gunicorn.
|
||||||
|
|
||||||
### Examples
|
### examples
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>kafka_example_perform</summary>
|
||||||
|
|
||||||
|
`kafka_example_perform` is the entry point of the examples.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
{{#include docs/examples/python/kafka_example.py}}
|
{{#include docs/examples/python/kafka_example_perform.py}}
|
||||||
```
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>kafka_example_common</summary>
|
||||||
|
|
||||||
|
`kafka_example_common` is the common code of the examples.
|
||||||
|
|
||||||
|
```py
|
||||||
|
{{#include docs/examples/python/kafka_example_common.py}}
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>kafka_example_producer</summary>
|
||||||
|
|
||||||
|
`kafka_example_producer` is `producer`, which is responsible for generating test data and sending it to kafka.
|
||||||
|
|
||||||
|
```py
|
||||||
|
{{#include docs/examples/python/kafka_example_producer.py}}
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>kafka_example_consumer</summary>
|
||||||
|
|
||||||
|
`kafka_example_consumer` is `consumer`,which is responsible for consuming data from kafka and writing it to TDengine.
|
||||||
|
|
||||||
|
```py
|
||||||
|
{{#include docs/examples/python/kafka_example_consumer.py}}
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### execute Python examples
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>execute Python examples</summary>
|
||||||
|
|
||||||
|
1. install and start up `kafka`
|
||||||
|
2. install python3 and pip
|
||||||
|
3. install `taospy` by pip
|
||||||
|
4. install `kafka-python` by pip
|
||||||
|
5. execute this example
|
||||||
|
|
||||||
|
The entry point of this example is `kafka_example_perform.py`. For more information about usage, please use `--help` command.
|
||||||
|
|
||||||
|
```
|
||||||
|
python3 kafka_example_perform.py --help
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the following command is creating 100 sub-table and inserting 20000 data for each table and the kafka max poll is 100 and 1 thread and 1 process per thread.
|
||||||
|
|
||||||
|
```
|
||||||
|
python3 kafka_example_perform.py -table-count=100 -table-items=20000 -max-poll=100 -threads=1 -processes=1
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Insert Data
|
title: Insert Data
|
||||||
|
description: This document describes how to insert data into TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.
|
TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Query Data
|
title: Query Data
|
||||||
description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors."
|
description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using connectors.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Stream Processing
|
|
||||||
description: "The TDengine stream processing engine combines data inserts, preprocessing, analytics, real-time computation, and alerting into a single component."
|
|
||||||
title: Stream Processing
|
title: Stream Processing
|
||||||
|
sidebar_label: Stream Processing
|
||||||
|
description: This document describes the stream processing component of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. In a traditional time-series solution, this generally requires the deployment of stream processing systems such as Kafka or Flink. However, the complexity of such systems increases the cost of development and maintenance.
|
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. In a traditional time-series solution, this generally requires the deployment of stream processing systems such as Kafka or Flink. However, the complexity of such systems increases the cost of development and maintenance.
|
||||||
|
|
|
@ -94,22 +94,21 @@ void close() throws SQLException;
|
||||||
<TabItem value="Python" label="Python">
|
<TabItem value="Python" label="Python">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class TaosConsumer():
|
class Consumer:
|
||||||
def __init__(self, *topics, **configs)
|
def subscribe(self, topics):
|
||||||
|
pass
|
||||||
|
|
||||||
def __iter__(self)
|
def unsubscribe(self):
|
||||||
|
pass
|
||||||
|
|
||||||
def __next__(self)
|
def poll(self, timeout: float = 1.0):
|
||||||
|
pass
|
||||||
|
|
||||||
def sync_next(self)
|
def close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
def subscription(self)
|
def commit(self, message):
|
||||||
|
pass
|
||||||
def unsubscribe(self)
|
|
||||||
|
|
||||||
def close(self)
|
|
||||||
|
|
||||||
def __del__(self)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
@ -117,19 +116,22 @@ class TaosConsumer():
|
||||||
<TabItem label="Go" value="Go">
|
<TabItem label="Go" value="Go">
|
||||||
|
|
||||||
```go
|
```go
|
||||||
func NewConsumer(conf *Config) (*Consumer, error)
|
func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)
|
||||||
|
|
||||||
func (c *Consumer) Close() error
|
// rebalanceCb is reserved for compatibility purpose
|
||||||
|
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
|
||||||
|
|
||||||
func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
|
// rebalanceCb is reserved for compatibility purpose
|
||||||
|
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error
|
||||||
|
|
||||||
func (c *Consumer) FreeMessage(message unsafe.Pointer)
|
func (c *Consumer) Poll(timeoutMs int) tmq.Event
|
||||||
|
|
||||||
func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
|
// tmq.TopicPartition is reserved for compatibility purpose
|
||||||
|
func (c *Consumer) Commit() ([]tmq.TopicPartition, error)
|
||||||
func (c *Consumer) Subscribe(topics []string) error
|
|
||||||
|
|
||||||
func (c *Consumer) Unsubscribe() error
|
func (c *Consumer) Unsubscribe() error
|
||||||
|
|
||||||
|
func (c *Consumer) Close() error
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
@ -220,7 +222,7 @@ A database including one supertable and two subtables is created as follows:
|
||||||
```sql
|
```sql
|
||||||
DROP DATABASE IF EXISTS tmqdb;
|
DROP DATABASE IF EXISTS tmqdb;
|
||||||
CREATE DATABASE tmqdb;
|
CREATE DATABASE tmqdb;
|
||||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16));
|
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
|
||||||
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
||||||
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
||||||
INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
|
INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
|
||||||
|
@ -357,50 +359,20 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
||||||
<TabItem label="Go" value="Go">
|
<TabItem label="Go" value="Go">
|
||||||
|
|
||||||
```go
|
```go
|
||||||
config := tmq.NewConfig()
|
conf := &tmq.ConfigMap{
|
||||||
defer config.Destroy()
|
"group.id": "test",
|
||||||
err = config.SetGroupID("test")
|
"auto.offset.reset": "earliest",
|
||||||
if err != nil {
|
"td.connect.ip": "127.0.0.1",
|
||||||
panic(err)
|
"td.connect.user": "root",
|
||||||
}
|
"td.connect.pass": "taosdata",
|
||||||
err = config.SetAutoOffsetReset("earliest")
|
"td.connect.port": "6030",
|
||||||
if err != nil {
|
"client.id": "test_tmq_c",
|
||||||
panic(err)
|
"enable.auto.commit": "false",
|
||||||
}
|
"enable.heartbeat.background": "true",
|
||||||
err = config.SetConnectIP("127.0.0.1")
|
"experimental.snapshot.enable": "true",
|
||||||
if err != nil {
|
"msg.with.table.name": "true",
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = config.SetConnectUser("root")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = config.SetConnectPass("taosdata")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = config.SetConnectPort("6030")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = config.SetMsgWithTableName(true)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = config.EnableHeartBeat()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
|
||||||
if result.ErrCode != 0 {
|
|
||||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
|
||||||
err := errors.NewError(int(result.ErrCode), errStr)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
|
consumer, err := NewConsumer(conf)
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
@ -422,23 +394,31 @@ let mut consumer = tmq.build()?;
|
||||||
|
|
||||||
<TabItem value="Python" label="Python">
|
<TabItem value="Python" label="Python">
|
||||||
|
|
||||||
|
```python
|
||||||
|
from taos.tmq import Consumer
|
||||||
|
|
||||||
|
# Syntax: `consumer = Consumer(configs)`
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
|
```
|
||||||
|
|
||||||
Python programs use the following parameters:
|
Python programs use the following parameters:
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
| Parameter | Type | Description | Remarks |
|
||||||
| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- |
|
|:---------:|:----:|:-----------:|:-------:|
|
||||||
| `td_connect_ip` | string | Used in establishing a connection; same as `taos_connect` | |
|
| `td.connect.ip` | string | Used in establishing a connection||
|
||||||
| `td_connect_user` | string | Used in establishing a connection; same as `taos_connect` | |
|
| `td.connect.user` | string | Used in establishing a connection||
|
||||||
| `td_connect_pass` | string | Used in establishing a connection; same as `taos_connect` | |
|
| `td.connect.pass` | string | Used in establishing a connection||
|
||||||
| `td_connect_port` | string | Used in establishing a connection; same as `taos_connect` | |
|
| `td.connect.port` | string | Used in establishing a connection||
|
||||||
| `group_id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
|
||||||
| `client_id` | string | Client ID | Maximum length: 192. |
|
| `client.id` | string | Client ID | Maximum length: 192 |
|
||||||
| `auto_offset_reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
|
||||||
| `enable_auto_commit` | string | Commit automatically | Specify `true` or `false`. |
|
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
||||||
| `auto_commit_interval_ms` | string | Interval for automatic commits, in milliseconds |
|
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
||||||
| `enable_heartbeat_background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false`. |
|
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||||
| `experimental_snapshot_enable` | string | Specify whether to consume messages from the WAL or from TSBS | Specify `true` or `false`. |
|
| `experimental.snapshot.enable` | string | Specify whether to consume messages from the WAL or from TSDB | Specify `true` or `false` |
|
||||||
| `msg_with_table_name` | string | Specify whether to deserialize table names from messages | Specify `true` or `false`.
|
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
||||||
| `timeout` | int | Consumer pull timeout | |
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
|
@ -523,11 +503,7 @@ consumer.subscribe(topics);
|
||||||
<TabItem value="Go" label="Go">
|
<TabItem value="Go" label="Go">
|
||||||
|
|
||||||
```go
|
```go
|
||||||
consumer, err := tmq.NewConsumer(config)
|
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -545,7 +521,7 @@ consumer.subscribe(["tmq_meters"]).await?;
|
||||||
<TabItem value="Python" label="Python">
|
<TabItem value="Python" label="Python">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
|
consumer.subscribe(['topic1', 'topic2'])
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
@ -611,13 +587,17 @@ while(running){
|
||||||
|
|
||||||
```go
|
```go
|
||||||
for {
|
for {
|
||||||
result, err := consumer.Poll(time.Second)
|
ev := consumer.Poll(0)
|
||||||
if err != nil {
|
if ev != nil {
|
||||||
panic(err)
|
switch e := ev.(type) {
|
||||||
|
case *tmqcommon.DataMessage:
|
||||||
|
fmt.Println(e.Value())
|
||||||
|
case tmqcommon.Error:
|
||||||
|
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
consumer.Commit()
|
||||||
}
|
}
|
||||||
fmt.Println(result)
|
|
||||||
consumer.Commit(context.Background(), result.Message)
|
|
||||||
consumer.FreeMessage(result.Message)
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -660,9 +640,17 @@ for {
|
||||||
<TabItem value="Python" label="Python">
|
<TabItem value="Python" label="Python">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
for msg in consumer:
|
while True:
|
||||||
for row in msg:
|
res = consumer.poll(100)
|
||||||
print(row)
|
if not res:
|
||||||
|
continue
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
val = res.value()
|
||||||
|
|
||||||
|
for block in val:
|
||||||
|
print(block.fetchall())
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
@ -729,7 +717,11 @@ consumer.close();
|
||||||
<TabItem value="Go" label="Go">
|
<TabItem value="Go" label="Go">
|
||||||
|
|
||||||
```go
|
```go
|
||||||
consumer.Close()
|
/* Unsubscribe */
|
||||||
|
_ = consumer.Unsubscribe()
|
||||||
|
|
||||||
|
/* Close consumer */
|
||||||
|
_ = consumer.Close()
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Caching
|
|
||||||
title: Caching
|
title: Caching
|
||||||
description: "This document describes the caching component of TDengine."
|
sidebar_label: Caching
|
||||||
|
description: This document describes the caching component of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine uses various kinds of caching techniques to efficiently write and query data. This document describes the caching component of TDengine.
|
TDengine uses various kinds of caching techniques to efficiently write and query data. This document describes the caching component of TDengine.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: UDF
|
|
||||||
title: User-Defined Functions (UDF)
|
title: User-Defined Functions (UDF)
|
||||||
description: "You can define your own scalar and aggregate functions to expand the query capabilities of TDengine."
|
sidebar_label: UDF
|
||||||
|
description: This document describes how to create user-defined functions (UDF), your own scalar and aggregate functions that can expand the query capabilities of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
|
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
|
||||||
|
|
|
@ -1,11 +1,31 @@
|
||||||
```java
|
import Tabs from '@theme/Tabs';
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
|
import TabItem from '@theme/TabItem';
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
<Tabs defaultValue="native">
|
||||||
```
|
<TabItem value="native" label="native connection">
|
||||||
```java
|
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
```java
|
||||||
```
|
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
|
||||||
```java
|
```
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
```java
|
||||||
```
|
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
||||||
|
```
|
||||||
|
```java
|
||||||
|
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="ws" label="WebSocket connection">
|
||||||
|
|
||||||
|
```java
|
||||||
|
{{#include docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java}}
|
||||||
|
```
|
||||||
|
```java
|
||||||
|
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
||||||
|
```
|
||||||
|
```java
|
||||||
|
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Developer Guide
|
title: Developer Guide
|
||||||
|
description: This document describes how to use the various components of TDengine from a developer's perspective.
|
||||||
---
|
---
|
||||||
|
|
||||||
Before creating an application to process time-series data with TDengine, consider the following:
|
Before creating an application to process time-series data with TDengine, consider the following:
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Manual Deployment
|
|
||||||
title: Manual Deployment and Management
|
title: Manual Deployment and Management
|
||||||
|
sidebar_label: Manual Deployment
|
||||||
|
description: This document describes how to deploy TDengine on a server.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Kubernetes
|
|
||||||
title: Deploying a TDengine Cluster in Kubernetes
|
title: Deploying a TDengine Cluster in Kubernetes
|
||||||
|
sidebar_label: Kubernetes
|
||||||
|
description: This document describes how to deploy TDengine on Kubernetes.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine is a cloud-native time-series database that can be deployed on Kubernetes. This document gives a step-by-step description of how you can use YAML files to create a TDengine cluster and introduces common operations for TDengine in a Kubernetes environment.
|
TDengine is a cloud-native time-series database that can be deployed on Kubernetes. This document gives a step-by-step description of how you can use YAML files to create a TDengine cluster and introduces common operations for TDengine in a Kubernetes environment.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Helm
|
|
||||||
title: Use Helm to deploy TDengine
|
title: Use Helm to deploy TDengine
|
||||||
|
sidebar_label: Helm
|
||||||
|
description: This document describes how to deploy TDengine on Kubernetes by using Helm.
|
||||||
---
|
---
|
||||||
|
|
||||||
Helm is a package manager for Kubernetes that can provide more capabilities in deploying on Kubernetes.
|
Helm is a package manager for Kubernetes that can provide more capabilities in deploying on Kubernetes.
|
||||||
|
@ -22,7 +23,7 @@ Helm uses the kubectl and kubeconfig configurations to perform Kubernetes operat
|
||||||
To use TDengine Chart, download it from GitHub:
|
To use TDengine Chart, download it from GitHub:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
|
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -38,7 +39,7 @@ With minikube, the default value is standard.
|
||||||
Use Helm commands to install TDengine:
|
Use Helm commands to install TDengine:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
helm install tdengine tdengine-3.0.0.tgz \
|
helm install tdengine tdengine-3.0.2.tgz \
|
||||||
--set storage.className=<your storage class name>
|
--set storage.className=<your storage class name>
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -46,7 +47,7 @@ helm install tdengine tdengine-3.0.0.tgz \
|
||||||
You can configure a small storage size in minikube to ensure that your deployment does not exceed your available disk space.
|
You can configure a small storage size in minikube to ensure that your deployment does not exceed your available disk space.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
helm install tdengine tdengine-3.0.0.tgz \
|
helm install tdengine tdengine-3.0.2.tgz \
|
||||||
--set storage.className=standard \
|
--set storage.className=standard \
|
||||||
--set storage.dataSize=2Gi \
|
--set storage.dataSize=2Gi \
|
||||||
--set storage.logSize=10Mi
|
--set storage.logSize=10Mi
|
||||||
|
@ -83,14 +84,14 @@ You can configure custom parameters in TDengine with the `values.yaml` file.
|
||||||
Run the `helm show values` command to see all parameters supported by TDengine Chart.
|
Run the `helm show values` command to see all parameters supported by TDengine Chart.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
helm show values tdengine-3.0.0.tgz
|
helm show values tdengine-3.0.2.tgz
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Save the output of this command as `values.yaml`. Then you can modify this file with your desired values and use it to deploy a TDengine cluster:
|
Save the output of this command as `values.yaml`. Then you can modify this file with your desired values and use it to deploy a TDengine cluster:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
helm install tdengine tdengine-3.0.0.tgz -f values.yaml
|
helm install tdengine tdengine-3.0.2.tgz -f values.yaml
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -107,7 +108,7 @@ image:
|
||||||
prefix: tdengine/tdengine
|
prefix: tdengine/tdengine
|
||||||
#pullPolicy: Always
|
#pullPolicy: Always
|
||||||
# Overrides the image tag whose default is the chart appVersion.
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
# tag: "3.0.0.0"
|
# tag: "3.0.2.0"
|
||||||
|
|
||||||
service:
|
service:
|
||||||
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
||||||
|
@ -155,15 +156,15 @@ clusterDomainSuffix: ""
|
||||||
# See the [Configuration Variables](../../reference/config)
|
# See the [Configuration Variables](../../reference/config)
|
||||||
#
|
#
|
||||||
# Note:
|
# Note:
|
||||||
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
|
# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up.
|
||||||
# 2. serverPort: should not be setted, we'll use the default 6030 in many places.
|
# 2. serverPort: should not be set, we'll use the default 6030 in many places.
|
||||||
# 3. fqdn: will be auto generated in kubenetes, user should not care about it.
|
# 3. fqdn: will be auto generated in kubernetes, user should not care about it.
|
||||||
# 4. role: currently role is not supported - every node is able to be mnode and vnode.
|
# 4. role: currently role is not supported - every node is able to be mnode and vnode.
|
||||||
#
|
#
|
||||||
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
||||||
taoscfg:
|
taoscfg:
|
||||||
# Starts as cluster or not, must be 0 or 1.
|
# Starts as cluster or not, must be 0 or 1.
|
||||||
# 0: all pods will start as a seperate TDengine server
|
# 0: all pods will start as a separate TDengine server
|
||||||
# 1: pods will start as TDengine server cluster. [default]
|
# 1: pods will start as TDengine server cluster. [default]
|
||||||
CLUSTER: "1"
|
CLUSTER: "1"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Deployment
|
title: Deployment
|
||||||
|
description: This document describes how to deploy a TDengine cluster on a server, on Kubernetes, and by using Helm.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
|
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Data Types
|
|
||||||
title: Data Types
|
title: Data Types
|
||||||
description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.'
|
sidebar_label: Data Types
|
||||||
|
description: This document describes the data types that TDengine supports.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Timestamp
|
## Timestamp
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Database
|
|
||||||
title: Database
|
title: Database
|
||||||
description: "create and drop database, show or change database parameters"
|
sidebar_label: Database
|
||||||
|
description: This document describes how to create and perform operations on databases.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Create a Database
|
## Create a Database
|
||||||
|
@ -27,10 +27,13 @@ database_option: {
|
||||||
| PRECISION {'ms' | 'us' | 'ns'}
|
| PRECISION {'ms' | 'us' | 'ns'}
|
||||||
| REPLICA value
|
| REPLICA value
|
||||||
| RETENTIONS ingestion_duration:keep_duration ...
|
| RETENTIONS ingestion_duration:keep_duration ...
|
||||||
| STRICT {'off' | 'on'}
|
|
||||||
| WAL_LEVEL {1 | 2}
|
| WAL_LEVEL {1 | 2}
|
||||||
| VGROUPS value
|
| VGROUPS value
|
||||||
| SINGLE_STABLE {0 | 1}
|
| SINGLE_STABLE {0 | 1}
|
||||||
|
| STT_TRIGGER value
|
||||||
|
| TABLE_PREFIX value
|
||||||
|
| TABLE_SUFFIX value
|
||||||
|
| TSDB_PAGESIZE value
|
||||||
| WAL_RETENTION_PERIOD value
|
| WAL_RETENTION_PERIOD value
|
||||||
| WAL_ROLL_PERIOD value
|
| WAL_ROLL_PERIOD value
|
||||||
| WAL_RETENTION_SIZE value
|
| WAL_RETENTION_SIZE value
|
||||||
|
@ -55,15 +58,12 @@ database_option: {
|
||||||
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
||||||
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
||||||
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
||||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default.
|
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. The Enterprise Edition supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; the Community Edition does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||||
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
||||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||||
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
|
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
|
||||||
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
|
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
|
||||||
- STRICT: specifies whether strong data consistency is enabled. The default value is off.
|
|
||||||
- on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster.
|
|
||||||
- off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node.
|
|
||||||
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
|
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
|
||||||
- 1: WAL is enabled but fsync is disabled.
|
- 1: WAL is enabled but fsync is disabled.
|
||||||
- 2: WAL and fsync are both enabled.
|
- 2: WAL and fsync are both enabled.
|
||||||
|
@ -71,6 +71,10 @@ database_option: {
|
||||||
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
||||||
- 0: The database can contain multiple supertables.
|
- 0: The database can contain multiple supertables.
|
||||||
- 1: The database can contain only one supertable.
|
- 1: The database can contain only one supertable.
|
||||||
|
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||||
|
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||||
|
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||||
|
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
||||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
||||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
||||||
|
@ -112,12 +116,32 @@ alter_database_options:
|
||||||
alter_database_option: {
|
alter_database_option: {
|
||||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||||
| CACHESIZE value
|
| CACHESIZE value
|
||||||
|
| BUFFER value
|
||||||
|
| PAGES value
|
||||||
|
| REPLICA value
|
||||||
|
| STT_TRIGGER value
|
||||||
| WAL_LEVEL value
|
| WAL_LEVEL value
|
||||||
| WAL_FSYNC_PERIOD value
|
| WAL_FSYNC_PERIOD value
|
||||||
| KEEP value
|
| KEEP value
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### ALTER CACHESIZE
|
||||||
|
|
||||||
|
The command of changing database configuration parameters is easy to use, but it's hard to determine whether a parameter is proper or not. In this section we will describe how to determine whether cachesize is big enough.
|
||||||
|
|
||||||
|
1. How to check cachesize?
|
||||||
|
|
||||||
|
You can use `select * from information_schema.ins_databases;` to get the value of cachesize.
|
||||||
|
|
||||||
|
2. How to check cacheload?
|
||||||
|
|
||||||
|
You can use `show <db_name>.vgroups;` to check the value of cacheload.
|
||||||
|
|
||||||
|
3. Determine whether cachesize is big engough
|
||||||
|
|
||||||
|
If the value of `cacheload` is very close to the value of `cachesize`, then it's very probably that `cachesize` is too small. If the value of `cacheload` is much smaller than the value of `cachesize`, then `cachesize` is big enough. You can use this simple principle to determine. Depending on how much memory is available in your system, you can choose to double `cachesize` or incrase it by even 5 or more times.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Other parameters cannot be modified after the database has been created.
|
Other parameters cannot be modified after the database has been created.
|
||||||
|
|
||||||
|
@ -154,3 +178,19 @@ TRIM DATABASE db_name;
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||||
|
|
||||||
|
## Redistribute Vgroup
|
||||||
|
|
||||||
|
```sql
|
||||||
|
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||||
|
```
|
||||||
|
|
||||||
|
Adjust the distribution of vnodes in the vgroup according to the given list of dnodes.
|
||||||
|
|
||||||
|
## Balance Vgroup
|
||||||
|
|
||||||
|
```sql
|
||||||
|
BALANCE VGROUP
|
||||||
|
```
|
||||||
|
|
||||||
|
Automatically adjusts the distribution of vnodes in all vgroups of the cluster, which is equivalent to load balancing the data of the cluster at the vnode level.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Table
|
title: Table
|
||||||
|
description: This document describes how to create and perform operations on standard tables and subtables.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Create Table
|
## Create Table
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Supertable
|
|
||||||
title: Supertable
|
title: Supertable
|
||||||
|
sidebar_label: Supertable
|
||||||
|
description: This document describes how to create and perform operations on supertables.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Create a Supertable
|
## Create a Supertable
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Insert
|
|
||||||
title: Insert
|
title: Insert
|
||||||
|
sidebar_label: Insert
|
||||||
|
description: This document describes how to insert data into TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
@ -27,7 +28,7 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||||
2. The precision of a timestamp depends on its format. The precision configured for the database affects only timestamps that are inserted as long integers (UNIX time). Timestamps inserted as date and time strings are not affected. As an example, the timestamp 2021-07-13 16:16:48 is equivalent to 1626164208 in UNIX time. This UNIX time is modified to 1626164208000 for databases with millisecond precision, 1626164208000000 for databases with microsecond precision, and 1626164208000000000 for databases with nanosecond precision.
|
2. The precision of a timestamp depends on its format. The precision configured for the database affects only timestamps that are inserted as long integers (UNIX time). Timestamps inserted as date and time strings are not affected. As an example, the timestamp 2021-07-13 16:16:48 is equivalent to 1626164208 in UNIX time. This UNIX time is modified to 1626164208000 for databases with millisecond precision, 1626164208000000 for databases with microsecond precision, and 1626164208000000000 for databases with nanosecond precision.
|
||||||
|
|
||||||
3. If you want to insert multiple rows simultaneously, do not use the NOW function in the timestamp. Using the NOW function in this situation will cause multiple rows to have the same timestamp and prevent them from being stored correctly. This is because the NOW function obtains the current time on the client, and multiple instances of NOW in a single statement will return the same time.
|
3. If you want to insert multiple rows simultaneously, do not use the NOW function in the timestamp. Using the NOW function in this situation will cause multiple rows to have the same timestamp and prevent them from being stored correctly. This is because the NOW function obtains the current time on the client, and multiple instances of NOW in a single statement will return the same time.
|
||||||
The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter. The latest timestamp that you can use when inserting data is equal to the current time on the server plus the value of the DURATION parameter. You can configure the KEEP and DURATION parameters when you create a database. The default values are 3650 days for the KEEP parameter and 10 days for the DURATION parameter.
|
The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter (You can configure the KEEP parameter when you create a database and the default value is 3650 days). The latest timestamp you can use when inserting data depends on the PRECISION parameter (You can configure the PRECISION parameter when you create a database, ms means milliseconds, us means microseconds, ns means nanoseconds, and the default value is milliseconds). If the timestamp precision is milliseconds or microseconds, the latest timestamp is the Unix epoch (January 1st, 1970 at 00:00:00.000 UTC) plus 1000 years, that is, January 1st, 2970 at 00:00:00.000 UTC; If the timestamp precision is nanoseconds, the latest timestamp is the Unix epoch plus 292 years, that is, January 1st, 2262 at 00:00:00.000000000 UTC.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Select
|
|
||||||
title: Select
|
title: Select
|
||||||
|
sidebar_label: Select
|
||||||
|
description: This document describes how to query data in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Syntax
|
## Syntax
|
||||||
|
@ -354,9 +355,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
||||||
|
|
||||||
## JOIN
|
## JOIN
|
||||||
|
|
||||||
TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table.
|
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
||||||
|
|
||||||
For standard tables, only the timestamp (primary key) can be used in join operations. For example:
|
For standard tables:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
|
@ -364,7 +365,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
||||||
WHERE t1.ts = t2.ts
|
WHERE t1.ts = t2.ts
|
||||||
```
|
```
|
||||||
|
|
||||||
For supertables, tags as well as timestamps can be used in join operations. For example:
|
For supertables:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT *
|
SELECT *
|
||||||
|
@ -372,21 +373,16 @@ FROM temp_stable t1, temp_stable t2
|
||||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
For sub-table and super table:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM temp_ctable t1, temp_stable t2
|
||||||
|
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||||
|
```
|
||||||
|
|
||||||
Similarly, join operations can be performed on the result sets of multiple subqueries.
|
Similarly, join operations can be performed on the result sets of multiple subqueries.
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
The following restriction apply to JOIN statements:
|
|
||||||
|
|
||||||
- The number of tables or supertables in a single join operation cannot exceed 10.
|
|
||||||
- `FILL` cannot be used in a JOIN statement.
|
|
||||||
- Arithmetic operations cannot be performed on the result sets of join operation.
|
|
||||||
- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation.
|
|
||||||
- `OR` cannot be used in the conditions for join operation
|
|
||||||
- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Nested Query
|
## Nested Query
|
||||||
|
|
||||||
Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
|
Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Delete Data
|
|
||||||
description: "Delete data from table or Stable"
|
|
||||||
title: Delete Data
|
title: Delete Data
|
||||||
|
sidebar_label: Delete Data
|
||||||
|
description: This document describes how to delete data from TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure.
|
TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Functions
|
|
||||||
title: Functions
|
title: Functions
|
||||||
|
sidebar_label: Functions
|
||||||
|
description: This document describes the standard SQL functions available in TDengine.
|
||||||
toc_max_heading_level: 4
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -795,19 +796,23 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
|
||||||
### PERCENTILE
|
### PERCENTILE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
PERCENTILE(expr, p)
|
PERCENTILE(expr, p [, p1] ...)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
|
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
|
||||||
|
|
||||||
**Return value type**: DOUBLE
|
**Return value type**: This function takes 2 minumum and 11 maximum parameters, and it can simultaneously return 10 percentiles at most. If 2 parameters are given, a single percentile is returned and the value type is DOUBLE.
|
||||||
|
If more than 2 parameters are given, the return value type is a VARCHAR string, the format of which is a JSON ARRAY containing all return values.
|
||||||
|
|
||||||
**Applicable column types**: Numeric
|
**Applicable column types**: Numeric
|
||||||
|
|
||||||
**Applicable table types**: table only
|
**Applicable table types**: table only
|
||||||
|
|
||||||
**More explanations**: _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
**More explanations**:
|
||||||
|
|
||||||
|
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||||
|
- When calculating multiple percentiles of a specific column, a single PERCENTILE function with multiple parameters is adviced, as this can largely reduce the query response time.
|
||||||
|
For example, using SELECT percentile(col, 90, 95, 99) FROM table will perform better than SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table.
|
||||||
|
|
||||||
## Selection Functions
|
## Selection Functions
|
||||||
|
|
||||||
|
@ -876,7 +881,8 @@ INTERP(expr)
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||||
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||||
|
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Time-Series Extensions
|
|
||||||
title: Time-Series Extensions
|
title: Time-Series Extensions
|
||||||
|
sidebar_label: Time-Series Extensions
|
||||||
|
description: This document describes the extended functions specific to time-series data processing available in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL.
|
As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Data Subscription
|
|
||||||
title: Data Subscription
|
title: Data Subscription
|
||||||
|
sidebar_label: Data Subscription
|
||||||
|
description: This document describes the SQL statements related to the data subscription component of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
The information in this document is related to the TDengine data subscription feature.
|
The information in this document is related to the TDengine data subscription feature.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Stream Processing
|
|
||||||
title: Stream Processing
|
title: Stream Processing
|
||||||
|
sidebar_label: Stream Processing
|
||||||
|
description: This document describes the SQL statements related to the stream processing component of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs.
|
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs.
|
||||||
|
@ -108,7 +109,7 @@ SHOW STREAMS;
|
||||||
|
|
||||||
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
||||||
|
|
||||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering:
|
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering,the default value is AT_ONCE:
|
||||||
|
|
||||||
1. AT_ONCE: triggers on write
|
1. AT_ONCE: triggers on write
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Operators
|
|
||||||
title: Operators
|
title: Operators
|
||||||
|
sidebar_label: Operators
|
||||||
|
description: This document describes the SQL operators available in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Arithmetic Operators
|
## Arithmetic Operators
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: JSON Type
|
|
||||||
title: JSON Type
|
title: JSON Type
|
||||||
|
sidebar_label: JSON Type
|
||||||
|
description: This document describes the JSON data type in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Escape Characters
|
title: Escape Characters
|
||||||
|
description: This document describes the usage of escape characters in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Escape Characters
|
## Escape Characters
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Name and Size Limits
|
|
||||||
title: Name and Size Limits
|
title: Name and Size Limits
|
||||||
|
sidebar_label: Name and Size Limits
|
||||||
|
description: This document describes the name and size limits in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Naming Rules
|
## Naming Rules
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Reserved Keywords
|
|
||||||
title: Reserved Keywords
|
title: Reserved Keywords
|
||||||
|
sidebar_label: Reserved Keywords
|
||||||
|
description: This document describes the reserved keywords in TDengine that cannot be used in object names.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Keyword List
|
## Keyword List
|
||||||
|
@ -17,6 +18,7 @@ The following list shows all reserved keywords:
|
||||||
- ADD
|
- ADD
|
||||||
- AFTER
|
- AFTER
|
||||||
- AGGREGATE
|
- AGGREGATE
|
||||||
|
- ALIVE
|
||||||
- ALL
|
- ALL
|
||||||
- ALTER
|
- ALTER
|
||||||
- ANALYZE
|
- ANALYZE
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Cluster
|
|
||||||
title: Cluster
|
title: Cluster
|
||||||
|
sidebar_label: Cluster
|
||||||
|
description: This document describes the SQL statements related to cluster management in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
The physical entities that form TDengine clusters are known as data nodes (dnodes). Each dnode is a process running on the operating system of the physical machine. Dnodes can contain virtual nodes (vnodes), which store time-series data. Virtual nodes are formed into vgroups, which have 1 or 3 vnodes depending on the replica setting. If you want to enable replication on your cluster, it must contain at least three nodes. Dnodes can also contain management nodes (mnodes). Each cluster has up to three mnodes. Finally, dnodes can contain query nodes (qnodes), which compute time-series data, thus separating compute from storage. A single dnode can contain a vnode, qnode, and mnode.
|
The physical entities that form TDengine clusters are known as data nodes (dnodes). Each dnode is a process running on the operating system of the physical machine. Dnodes can contain virtual nodes (vnodes), which store time-series data. Virtual nodes are formed into vgroups, which have 1 or 3 vnodes depending on the replica setting. If you want to enable replication on your cluster, it must contain at least three nodes. Dnodes can also contain management nodes (mnodes). Each cluster has up to three mnodes. Finally, dnodes can contain query nodes (qnodes), which compute time-series data, thus separating compute from storage. A single dnode can contain a vnode, qnode, and mnode.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Metadata
|
|
||||||
title: Information_Schema Database
|
title: Information_Schema Database
|
||||||
|
sidebar_label: Metadata
|
||||||
|
description: This document describes how to use the INFORMATION_SCHEMA database in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide access to database metadata, system information, and status information. This information includes database names, table names, and currently running SQL statements. All information related to TDengine maintenance is stored in this database. It contains several read-only tables. These tables are more accurately described as views, and they do not correspond to specific files. You can query these tables but cannot write data to them. The INFORMATION_SCHEMA database is intended to provide a unified method for SHOW commands to access data. However, using SELECT ... FROM INFORMATION_SCHEMA.tablename offers several advantages over SHOW commands:
|
TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide access to database metadata, system information, and status information. This information includes database names, table names, and currently running SQL statements. All information related to TDengine maintenance is stored in this database. It contains several read-only tables. These tables are more accurately described as views, and they do not correspond to specific files. You can query these tables but cannot write data to them. The INFORMATION_SCHEMA database is intended to provide a unified method for SHOW commands to access data. However, using SELECT ... FROM INFORMATION_SCHEMA.tablename offers several advantages over SHOW commands:
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Statistics
|
|
||||||
title: Performance_Schema Database
|
title: Performance_Schema Database
|
||||||
|
sidebar_label: Statistics
|
||||||
|
description: This document describes how to use the PERFORMANCE_SCHEMA database in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine includes a built-in database named `PERFORMANCE_SCHEMA` to provide access to database performance statistics. This document introduces the tables of PERFORMANCE_SCHEMA and their structure.
|
TDengine includes a built-in database named `PERFORMANCE_SCHEMA` to provide access to database performance statistics. This document introduces the tables of PERFORMANCE_SCHEMA and their structure.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: SHOW Statement
|
|
||||||
title: SHOW Statement for Metadata
|
title: SHOW Statement for Metadata
|
||||||
|
sidebar_label: SHOW Statement
|
||||||
|
description: This document describes how to use the SHOW statement in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||||
|
@ -178,6 +179,139 @@ SHOW TABLE DISTRIBUTED table_name;
|
||||||
|
|
||||||
Shows how table data is distributed.
|
Shows how table data is distributed.
|
||||||
|
|
||||||
|
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
show table distributed d0\G;
|
||||||
|
```
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary> Show Example </summary>
|
||||||
|
<pre><code>
|
||||||
|
*************************** 1.row ***************************
|
||||||
|
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
||||||
|
|
||||||
|
Total_Blocks : Table `d0` contains total 5 blocks
|
||||||
|
|
||||||
|
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||||
|
|
||||||
|
Average_size: The average size of each block is 18.73 KB
|
||||||
|
|
||||||
|
Compression_Ratio: The data compression rate is 23.98%
|
||||||
|
|
||||||
|
*************************** 2.row ***************************
|
||||||
|
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||||
|
|
||||||
|
Total_Rows: Table `d0` contains 20,000 rows
|
||||||
|
|
||||||
|
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||||
|
|
||||||
|
MinRows: The minimum number of rows in a block is 3,616
|
||||||
|
|
||||||
|
MaxRows: The maximum number of rows in a block is 4,096B
|
||||||
|
|
||||||
|
Average_Rows: The average number of rows in a block is 4,000
|
||||||
|
|
||||||
|
*************************** 3.row ***************************
|
||||||
|
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||||
|
|
||||||
|
Total_Tables: The number of child tables, 1 in this example
|
||||||
|
|
||||||
|
Total_Files: The number of files storing the table's data, 2 in this example
|
||||||
|
|
||||||
|
*************************** 4.row ***************************
|
||||||
|
|
||||||
|
_block_dist: --------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
*************************** 5.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 0100 |
|
||||||
|
|
||||||
|
*************************** 6.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 0299 |
|
||||||
|
|
||||||
|
*************************** 7.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 0498 |
|
||||||
|
|
||||||
|
*************************** 8.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 0697 |
|
||||||
|
|
||||||
|
*************************** 9.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 0896 |
|
||||||
|
|
||||||
|
*************************** 10.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 1095 |
|
||||||
|
|
||||||
|
*************************** 11.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 1294 |
|
||||||
|
|
||||||
|
*************************** 12.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 1493 |
|
||||||
|
|
||||||
|
*************************** 13.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 1692 |
|
||||||
|
|
||||||
|
*************************** 14.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 1891 |
|
||||||
|
|
||||||
|
*************************** 15.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 2090 |
|
||||||
|
|
||||||
|
*************************** 16.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 2289 |
|
||||||
|
|
||||||
|
*************************** 17.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 2488 |
|
||||||
|
|
||||||
|
*************************** 18.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 2687 |
|
||||||
|
|
||||||
|
*************************** 19.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 2886 |
|
||||||
|
|
||||||
|
*************************** 20.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 3085 |
|
||||||
|
|
||||||
|
*************************** 21.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 3284 |
|
||||||
|
|
||||||
|
*************************** 22.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
|
||||||
|
|
||||||
|
*************************** 23.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 3682 |
|
||||||
|
|
||||||
|
*************************** 24.row ***************************
|
||||||
|
|
||||||
|
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
|
||||||
|
|
||||||
|
Query OK, 24 row(s) in set (0.002444s)
|
||||||
|
|
||||||
|
</code></pre>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
The above show the block distribution percentage according to the number of rows in each block. In the above example, we can get below information:
|
||||||
|
- `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681.
|
||||||
|
- `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. - The number of blocks whose rows fall in other range is zero.
|
||||||
|
|
||||||
## SHOW TAGS
|
## SHOW TAGS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -230,7 +364,7 @@ Shows information about all vgroups in the system or about the vgroups for a spe
|
||||||
## SHOW VNODES
|
## SHOW VNODES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SHOW VNODES [dnode_name];
|
SHOW VNODES {dnode_id | dnode_endpoint};
|
||||||
```
|
```
|
||||||
|
|
||||||
Shows information about all vnodes in the system or about the vnodes for a specified dnode.
|
Shows information about all vnodes in the system or about the vnodes for a specified dnode.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Access Control
|
|
||||||
title: User and Access Control
|
title: User and Access Control
|
||||||
description: Manage user and user's permission
|
sidebar_label: Access Control
|
||||||
|
description: This document describes how to manage users and permissions in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
This document describes how to manage permissions in TDengine.
|
This document describes how to manage permissions in TDengine.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: User-Defined Functions
|
|
||||||
title: User-Defined Functions (UDF)
|
title: User-Defined Functions (UDF)
|
||||||
|
sidebar_label: User-Defined Functions
|
||||||
|
description: This document describes the SQL statements related to user-defined functions (UDF) in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
You can create user-defined functions and import them into TDengine.
|
You can create user-defined functions and import them into TDengine.
|
||||||
|
@ -40,7 +41,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
|
||||||
```sql
|
```sql
|
||||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
||||||
```
|
```
|
||||||
For more information about user-defined functions, see [User-Defined Functions](../../develop/udf).
|
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
|
||||||
|
|
||||||
## Manage UDF
|
## Manage UDF
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Index
|
title: Indexing
|
||||||
title: Using Indices
|
sidebar_label: Indexing
|
||||||
|
description: This document describes the SQL statements related to indexing in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine supports SMA and FULLTEXT indexing.
|
TDengine supports SMA and FULLTEXT indexing.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Error Recovery
|
|
||||||
title: Error Recovery
|
title: Error Recovery
|
||||||
|
sidebar_label: Error Recovery
|
||||||
|
description: This document describes the SQL statements related to error recovery in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
In a complex environment, connections and query tasks may encounter errors or fail to return in a reasonable time. If this occurs, you can terminate the connection or task.
|
In a complex environment, connections and query tasks may encounter errors or fail to return in a reasonable time. If this occurs, you can terminate the connection or task.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Changes in TDengine 3.0
|
|
||||||
title: Changes in TDengine 3.0
|
title: Changes in TDengine 3.0
|
||||||
description: "This document explains how TDengine SQL has changed in version 3.0."
|
sidebar_label: Changes in TDengine 3.0
|
||||||
|
description: This document describes how TDengine SQL has changed in version 3.0 compared with previous versions.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Basic SQL Elements
|
## Basic SQL Elements
|
||||||
|
@ -54,7 +54,6 @@ The following data types can be used in the schema for standard tables.
|
||||||
| 27 | GRANT | Added | Grants permissions to a user.
|
| 27 | GRANT | Added | Grants permissions to a user.
|
||||||
| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction.
|
| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction.
|
||||||
| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature.
|
| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature.
|
||||||
| 30 | MERGE VGROUP | Added | Merges vgroups.
|
|
||||||
| 31 | REVOKE | Added | Revokes permissions from a user.
|
| 31 | REVOKE | Added | Revokes permissions from a user.
|
||||||
| 32 | SELECT | Modified | <ul><li>SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause. </li><li>DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY. </li><li>JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables. </li><li>Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated. </li><li>All scalar functions can be used after WHERE. </li><li>GROUP BY is enhanced. You can group by any scalar expression or combination thereof. </li><li>SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used. </li><li>Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags. </li></ul>
|
| 32 | SELECT | Modified | <ul><li>SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause. </li><li>DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY. </li><li>JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables. </li><li>Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated. </li><li>All scalar functions can be used after WHERE. </li><li>GROUP BY is enhanced. You can group by any scalar expression or combination thereof. </li><li>SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used. </li><li>Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags. </li></ul>
|
||||||
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||||
|
@ -76,8 +75,9 @@ The following data types can be used in the schema for standard tables.
|
||||||
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
|
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
|
||||||
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.
|
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.
|
||||||
| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported.
|
| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported.
|
||||||
| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups.
|
| 52 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||||
| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
| 53 | REDISTRIBUTE VGROUP | Added | Adjust the distribution of VNODES in VGROUP.
|
||||||
|
| 54 | BALANCE VGROUP | Added | Auto adjust the distribution of VNODES in VGROUP.
|
||||||
|
|
||||||
## SQL Functions
|
## SQL Functions
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: TDengine SQL
|
title: TDengine SQL
|
||||||
description: 'The syntax supported by TDengine SQL '
|
description: This document describes the syntax and functions supported by TDengine SQL.
|
||||||
---
|
---
|
||||||
|
|
||||||
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
|
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Install and Uninstall
|
title: Install and Uninstall
|
||||||
description: Install, Uninstall, Start, Stop and Upgrade
|
description: This document describes how to install, upgrade, and uninstall TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Resource Planning
|
|
||||||
title: Resource Planning
|
title: Resource Planning
|
||||||
|
sidebar_label: Resource Planning
|
||||||
|
description: This document describes how to plan compute and storage resources for your TDengine cluster.
|
||||||
---
|
---
|
||||||
|
|
||||||
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
|
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Fault Tolerance and Disaster Recovery
|
title: Fault Tolerance and Disaster Recovery
|
||||||
|
description: This document describes how TDengine provides fault tolerance and disaster recovery.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Fault Tolerance
|
## Fault Tolerance
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Data Import
|
title: Data Import
|
||||||
|
description: This document describes how to import data into TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`.
|
There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Data Export
|
title: Data Export
|
||||||
|
description: This document describes how to export data from TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
There are two ways of exporting data from a TDengine cluster:
|
There are two ways of exporting data from a TDengine cluster:
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: TDengine Monitoring
|
title: TDengine Monitoring
|
||||||
|
description: This document describes how to monitor your TDengine cluster.
|
||||||
---
|
---
|
||||||
|
|
||||||
After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
|
After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Problem Diagnostics
|
title: Problem Diagnostics
|
||||||
|
description: This document describes how to diagnose issues with your TDengine cluster.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Network Connection Diagnostics
|
## Network Connection Diagnostics
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Administration
|
title: Administration
|
||||||
|
description: This document describes how to perform management operations on your TDengine cluster from an administrator's perspective.
|
||||||
---
|
---
|
||||||
|
|
||||||
This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
|
This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: REST API
|
title: REST API
|
||||||
|
description: This document describes the TDengine REST API.
|
||||||
---
|
---
|
||||||
|
|
||||||
To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database.
|
To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: C/C++
|
|
||||||
title: C/C++ Connector
|
title: C/C++ Connector
|
||||||
|
sidebar_label: C/C++
|
||||||
|
description: This document describes the TDengine C/C++ connector.
|
||||||
---
|
---
|
||||||
|
|
||||||
C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located.
|
C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located.
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
toc_max_heading_level: 4
|
|
||||||
sidebar_label: Java
|
|
||||||
title: TDengine Java Connector
|
title: TDengine Java Connector
|
||||||
description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors.
|
sidebar_label: Java
|
||||||
|
description: This document describes the TDengine Java Connector.
|
||||||
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
|
@ -696,6 +696,9 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
- enable.auto.commit: Specifies whether to commit automatically.
|
- enable.auto.commit: Specifies whether to commit automatically.
|
||||||
- group.id: consumer: Specifies the group that the consumer is in.
|
- group.id: consumer: Specifies the group that the consumer is in.
|
||||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||||
|
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||||
|
- httpConnectTimeout:WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||||
|
- messageWaitTimeout:socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||||
|
|
||||||
#### Subscribe to consume data
|
#### Subscribe to consume data
|
||||||
|
@ -724,6 +727,11 @@ For more information, see [Data Subscription](../../../develop/tmq).
|
||||||
|
|
||||||
### Usage examples
|
### Usage examples
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
In addition to the native connection, the Java Connector also supports subscribing via websocket.
|
||||||
|
|
||||||
```java
|
```java
|
||||||
public abstract class ConsumerLoop {
|
public abstract class ConsumerLoop {
|
||||||
private final TaosConsumer<ResultBean> consumer;
|
private final TaosConsumer<ResultBean> consumer;
|
||||||
|
@ -795,6 +803,87 @@ public abstract class ConsumerLoop {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="ws" label="WebSocket connection">
|
||||||
|
|
||||||
|
```java
|
||||||
|
public abstract class ConsumerLoop {
|
||||||
|
private final TaosConsumer<ResultBean> consumer;
|
||||||
|
private final List<String> topics;
|
||||||
|
private final AtomicBoolean shutdown;
|
||||||
|
private final CountDownLatch shutdownLatch;
|
||||||
|
|
||||||
|
public ConsumerLoop() throws SQLException {
|
||||||
|
Properties config = new Properties();
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||||
|
config.setProperty("td.connect.type", "ws");
|
||||||
|
config.setProperty("msg.with.table.name", "true");
|
||||||
|
config.setProperty("enable.auto.commit", "true");
|
||||||
|
config.setProperty("group.id", "group2");
|
||||||
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||||
|
|
||||||
|
this.consumer = new TaosConsumer<>(config);
|
||||||
|
this.topics = Collections.singletonList("topic_speed");
|
||||||
|
this.shutdown = new AtomicBoolean(false);
|
||||||
|
this.shutdownLatch = new CountDownLatch(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
public abstract void process(ResultBean result);
|
||||||
|
|
||||||
|
public void pollData() throws SQLException {
|
||||||
|
try {
|
||||||
|
consumer.subscribe(topics);
|
||||||
|
|
||||||
|
while (!shutdown.get()) {
|
||||||
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||||
|
for (ResultBean record : records) {
|
||||||
|
process(record);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
consumer.unsubscribe();
|
||||||
|
} finally {
|
||||||
|
consumer.close();
|
||||||
|
shutdownLatch.countDown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void shutdown() throws InterruptedException {
|
||||||
|
shutdown.set(true);
|
||||||
|
shutdownLatch.await();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class ResultDeserializer extends ReferenceDeserializer<ResultBean> {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class ResultBean {
|
||||||
|
private Timestamp ts;
|
||||||
|
private int speed;
|
||||||
|
|
||||||
|
public Timestamp getTs() {
|
||||||
|
return ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setTs(Timestamp ts) {
|
||||||
|
this.ts = ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getSpeed() {
|
||||||
|
return speed;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setSpeed(int speed) {
|
||||||
|
this.speed = speed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
> **Note**: The value of value.deserializer should be adjusted based on the package path of the test environment.
|
||||||
|
|
||||||
### Use with connection pool
|
### Use with connection pool
|
||||||
|
|
||||||
#### HikariCP
|
#### HikariCP
|
||||||
|
@ -878,8 +967,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes |
|
| taos-jdbcdriver version | major changes |
|
||||||
| :---------------------: | :--------------------------------------------: |
|
| :---------------------: | :--------------------------------------------: |
|
||||||
| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment |
|
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||||
|
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||||
| 3.0.0 | Support for TDengine 3.0 |
|
| 3.0.0 | Support for TDengine 3.0 |
|
||||||
|
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
---
|
---
|
||||||
toc_max_heading_level: 4
|
|
||||||
sidebar_label: Go
|
|
||||||
title: TDengine Go Connector
|
title: TDengine Go Connector
|
||||||
|
sidebar_label: Go
|
||||||
|
description: This document describes the TDengine Go connector.
|
||||||
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
|
@ -355,26 +356,29 @@ The `af` package encapsulates TDengine advanced functions such as connection man
|
||||||
|
|
||||||
#### Subscribe
|
#### Subscribe
|
||||||
|
|
||||||
* `func NewConsumer(conf *Config) (*Consumer, error)`
|
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||||
|
|
||||||
Creates consumer group.
|
Creates consumer group.
|
||||||
|
|
||||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||||
|
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||||
|
|
||||||
|
Subscribes a topic.
|
||||||
|
|
||||||
|
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||||
|
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||||
|
|
||||||
Subscribes to topics.
|
Subscribes to topics.
|
||||||
|
|
||||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||||
|
|
||||||
Polling information.
|
Polling information.
|
||||||
|
|
||||||
* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error`
|
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||||
|
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||||
|
|
||||||
Commit information.
|
Commit information.
|
||||||
|
|
||||||
* `func (c *Consumer) FreeMessage(message unsafe.Pointer)`
|
|
||||||
|
|
||||||
Free information.
|
|
||||||
|
|
||||||
* `func (c *Consumer) Unsubscribe() error`
|
* `func (c *Consumer) Unsubscribe() error`
|
||||||
|
|
||||||
Unsubscribe.
|
Unsubscribe.
|
||||||
|
@ -441,25 +445,36 @@ Close consumer.
|
||||||
|
|
||||||
### Subscribe via WebSocket
|
### Subscribe via WebSocket
|
||||||
|
|
||||||
* `func NewConsumer(config *Config) (*Consumer, error)`
|
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||||
|
|
||||||
Creates consumer group.
|
Creates consumer group.
|
||||||
|
|
||||||
* `func (c *Consumer) Subscribe(topic []string) error`
|
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||||
|
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||||
|
|
||||||
Subscribes to topics.
|
Subscribes a topic.
|
||||||
|
|
||||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||||
|
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||||
|
|
||||||
Polling information.
|
Subscribes to topics.
|
||||||
|
|
||||||
* `func (c *Consumer) Commit(messageID uint64) error`
|
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||||
|
|
||||||
Commit information.
|
Polling information.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||||
|
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||||
|
|
||||||
|
Commit information.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Unsubscribe() error`
|
||||||
|
|
||||||
|
Unsubscribe.
|
||||||
|
|
||||||
* `func (c *Consumer) Close() error`
|
* `func (c *Consumer) Close() error`
|
||||||
|
|
||||||
Close consumer.
|
Close consumer.
|
||||||
|
|
||||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
---
|
---
|
||||||
toc_max_heading_level: 4
|
|
||||||
sidebar_label: Rust
|
|
||||||
title: TDengine Rust Connector
|
title: TDengine Rust Connector
|
||||||
|
sidebar_label: Rust
|
||||||
|
description: This document describes the TDengine Rust connector.
|
||||||
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Python
|
|
||||||
title: TDengine Python Connector
|
title: TDengine Python Connector
|
||||||
description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas."
|
sidebar_label: Python
|
||||||
|
description: This document describes taospy, the TDengine Python connector.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
||||||
|
|
||||||
### Preparation
|
### Preparation
|
||||||
|
|
||||||
1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
1. Install Python. The recent taospy package requires Python 3.6+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||||
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
||||||
|
|
||||||
|
@ -78,6 +78,22 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
#### Install `taos-ws-py` (Optional)
|
||||||
|
|
||||||
|
The taos-ws-py package provides the way to access TDengine via WebSocket.
|
||||||
|
|
||||||
|
##### Install taos-ws-py with taospy
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install taospy[ws]
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Install taos-ws-py only
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install taos-ws-py
|
||||||
|
```
|
||||||
|
|
||||||
### Verify
|
### Verify
|
||||||
|
|
||||||
<Tabs defaultValue="rest">
|
<Tabs defaultValue="rest">
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
---
|
---
|
||||||
toc_max_heading_level: 4
|
|
||||||
sidebar_label: Node.js
|
|
||||||
title: TDengine Node.js Connector
|
title: TDengine Node.js Connector
|
||||||
|
sidebar_label: Node.js
|
||||||
|
description: This document describes the TDengine Node.js connector.
|
||||||
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
@ -31,7 +32,9 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
|
|
||||||
## Supported features
|
## Supported features
|
||||||
|
|
||||||
### Native connectors
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="Native connector">
|
||||||
|
|
||||||
1. Connection Management
|
1. Connection Management
|
||||||
2. General Query
|
2. General Query
|
||||||
|
@ -40,12 +43,16 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
5. Subscription
|
5. Subscription
|
||||||
6. Schemaless
|
6. Schemaless
|
||||||
|
|
||||||
### REST Connector
|
</TabItem>
|
||||||
|
<TabItem value="rest" label="REST connector">
|
||||||
|
|
||||||
1. Connection Management
|
1. Connection Management
|
||||||
2. General Query
|
2. General Query
|
||||||
3. Continuous Query
|
3. Continuous Query
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## Installation Steps
|
## Installation Steps
|
||||||
|
|
||||||
### Pre-installation preparation
|
### Pre-installation preparation
|
||||||
|
@ -59,9 +66,19 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
- `python` (recommended for `v2.7` , `v3.x.x` currently not supported)
|
- `python` (recommended for `v2.7` , `v3.x.x` currently not supported)
|
||||||
- `@tdengine/client` 3.0.0 supports Node.js LTS v10.9.0 or later and Node.js LTS v12.8.0 or later. Older versions may be incompatible.
|
- `@tdengine/client` 3.0.0 supports Node.js LTS v10.9.0 or later and Node.js LTS v12.8.0 or later. Older versions may be incompatible.
|
||||||
- `make`
|
- `make`
|
||||||
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or higher
|
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or later.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="macOS" label="macOS installation dependencies">
|
||||||
|
|
||||||
|
- `python` (recommended for `v2.7` , `v3.x.x` currently not supported)
|
||||||
|
- `@tdengine/client` 3.0.0 currently supports Node.js from v12.22.12, but only later versions of v12. Other versions may be incompatible.
|
||||||
|
- `make`
|
||||||
|
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or later.
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="Windows" label="Windows system installation dependencies">
|
<TabItem value="Windows" label="Windows system installation dependencies">
|
||||||
|
|
||||||
- Installation method 1
|
- Installation method 1
|
||||||
|
@ -104,6 +121,9 @@ npm install @tdengine/rest
|
||||||
|
|
||||||
### Verify
|
### Verify
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="Native connector">
|
||||||
|
|
||||||
After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine.
|
After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine.
|
||||||
|
|
||||||
Verification in details:
|
Verification in details:
|
||||||
|
@ -120,6 +140,28 @@ node nodejsChecker.js host=localhost
|
||||||
|
|
||||||
- After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query.
|
- After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query.
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="rest" label="REST connector">
|
||||||
|
|
||||||
|
After installing the TDengine client, use the `restChecker.js` program to verify that the current environment supports Node.js access to TDengine.
|
||||||
|
|
||||||
|
Verification in details:
|
||||||
|
|
||||||
|
- Create an installation test folder such as `~/tdengine-test`. Download the [restChecker.js source code](https://github.com/taosdata/TDengine/tree/3.0/docs/examples/node/restexample/restChecker.js) to your local.
|
||||||
|
|
||||||
|
- Execute the following command from the command-line.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm init -y
|
||||||
|
npm install @tdengine/rest
|
||||||
|
node restChecker.js
|
||||||
|
```
|
||||||
|
|
||||||
|
- After executing the above steps, the command-line will output the result of `restChecker.js` connecting to the TDengine instance and performing a simple insert and query.
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## Establishing a connection
|
## Establishing a connection
|
||||||
|
|
||||||
Please choose to use one of the connectors.
|
Please choose to use one of the connectors.
|
||||||
|
@ -171,24 +213,69 @@ let cursor = conn.cursor();
|
||||||
|
|
||||||
#### SQL Write
|
#### SQL Write
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
<NodeInsert />
|
<NodeInsert />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
|
```js
|
||||||
|
{{#include docs/examples/node/restexample/insert_example.js}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
#### InfluxDB line protocol write
|
#### InfluxDB line protocol write
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
<NodeInfluxLine />
|
<NodeInfluxLine />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
#### OpenTSDB Telnet line protocol write
|
#### OpenTSDB Telnet line protocol write
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
<NodeOpenTSDBTelnet />
|
<NodeOpenTSDBTelnet />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
#### OpenTSDB JSON line protocol write
|
#### OpenTSDB JSON line protocol write
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
<NodeOpenTSDBJson />
|
<NodeOpenTSDBJson />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
### Querying data
|
### Querying data
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
<NodeQuery />
|
<NodeQuery />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
|
```js
|
||||||
|
{{#include docs/examples/node/restexample/query_example.js}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
|
||||||
## More sample programs
|
## More sample programs
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
---
|
---
|
||||||
toc_max_heading_level: 4
|
|
||||||
sidebar_label: C#
|
|
||||||
title: C# Connector
|
title: C# Connector
|
||||||
|
sidebar_label: C#
|
||||||
|
description: This document describes the TDengine C# connector.
|
||||||
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
|
@ -17,7 +18,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
||||||
|
|
||||||
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
|
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
|
||||||
|
|
||||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket from v3.0.1 and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
||||||
|
|
||||||
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
|
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
|
||||||
|
|
||||||
|
@ -66,31 +67,43 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
|
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
|
||||||
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details
|
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details
|
||||||
|
|
||||||
### Install via dotnet CLI
|
### Install `TDengine.Connector`
|
||||||
|
|
||||||
<Tabs defaultValue="CLI">
|
<Tabs defaultValue="CLI">
|
||||||
<TabItem value="CLI" label="Get C# driver using dotnet CLI">
|
<TabItem value="CLI" label="Native Connection">
|
||||||
|
|
||||||
You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` command under the path of the existing .NET project.
|
You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` CLI under the path of the existing .NET project.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
dotnet add package TDengine.Connector
|
dotnet add package TDengine.Connector
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
You may also modify the current.NET project file. You can include the following 'ItemGroup' in your project file (.csproj).
|
||||||
<TabItem value="source" label="Use source code to get C# driver">
|
|
||||||
|
|
||||||
You can [download the source code](https://github.com/taosdata/taos-connector-dotnet/tree/3.0) and directly reference the latest version of the TDengine.Connector library.
|
``` XML
|
||||||
|
<ItemGroup>
|
||||||
```bash
|
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||||
git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git
|
</ItemGroup>
|
||||||
cd taos-connector-dotnet
|
|
||||||
cp -r src/ myProject
|
|
||||||
|
|
||||||
cd myProject
|
|
||||||
dotnet add exmaple.csproj reference src/TDengine.csproj
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="source" label="WebSocket Connection">
|
||||||
|
|
||||||
|
In this scenario, modifying your project file is required in order to copy the WebSocket dependency dynamic library from the nuget package into your project.
|
||||||
|
```XML
|
||||||
|
<ItemGroup>
|
||||||
|
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||||
|
</ItemGroup>
|
||||||
|
<Target Name="copyDLLDepency" BeforeTargets="BeforeBuild">
|
||||||
|
<ItemGroup>
|
||||||
|
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||||
|
</ItemGroup>
|
||||||
|
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||||
|
</Target>
|
||||||
|
```
|
||||||
|
|
||||||
|
Notice: `TDengine.Connector` only version>= 3.0.2 includes the dynamic library for WebSocket.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
@ -252,19 +265,20 @@ ws://localhost:6041/test
|
||||||
|
|
||||||
|Sample program |Sample program description |
|
|Sample program |Sample program description |
|
||||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector |
|
||||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||||
|
|
||||||
## Important update records
|
## Important update records
|
||||||
|
|
||||||
| TDengine.Connector | Description |
|
| TDengine.Connector | Description |
|
||||||
|--------------------|--------------------------------|
|
|--------------------|--------------------------------|
|
||||||
|
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
||||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
||||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: PHP
|
|
||||||
title: PHP Connector
|
title: PHP Connector
|
||||||
|
sidebar_label: PHP
|
||||||
|
description: This document describes the TDengine PHP connector.
|
||||||
---
|
---
|
||||||
|
|
||||||
`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine.
|
`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine.
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Connector
|
title: Connector
|
||||||
|
description: This document describes the connectors that TDengine provides to interface with various programming languages.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
||||||
|
@ -59,11 +60,11 @@ The different database framework specifications for various programming language
|
||||||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Parameter Binding** | Not supported | Not supported | support | Support | Not supported | Support |
|
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||||
| **Subscription (TMQ) ** | Not supported | Not supported | support | Not supported | Not supported | Support |
|
| **Subscription (TMQ) ** | Not Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||||
| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
|
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | support | Support | Support |
|
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||||
| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
|
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
title: "taosAdapter"
|
title: taosAdapter
|
||||||
description: "taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine."
|
sidebar_label: taosAdapter
|
||||||
sidebar_label: "taosAdapter"
|
description: This document describes how to use taosAdapter, a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Prometheus from "./_prometheus.mdx"
|
import Prometheus from "./_prometheus.mdx"
|
||||||
|
@ -21,6 +21,7 @@ taosAdapter provides the following features.
|
||||||
- Seamless connection to collectd
|
- Seamless connection to collectd
|
||||||
- Seamless connection to StatsD
|
- Seamless connection to StatsD
|
||||||
- Supports Prometheus remote_read and remote_write
|
- Supports Prometheus remote_read and remote_write
|
||||||
|
- Get table's VGroup ID
|
||||||
|
|
||||||
## taosAdapter architecture diagram
|
## taosAdapter architecture diagram
|
||||||
|
|
||||||
|
@ -59,6 +60,7 @@ Usage of taosAdapter:
|
||||||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||||
|
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
|
||||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||||
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
||||||
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
||||||
|
@ -100,6 +102,7 @@ Usage of taosAdapter:
|
||||||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||||
|
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
|
||||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||||
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
||||||
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
||||||
|
@ -110,6 +113,7 @@ Usage of taosAdapter:
|
||||||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||||
|
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl)
|
||||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
||||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
||||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
||||||
|
@ -131,6 +135,7 @@ Usage of taosAdapter:
|
||||||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
||||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||||
|
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
|
||||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||||
--version Print the version and exit
|
--version Print the version and exit
|
||||||
```
|
```
|
||||||
|
@ -174,6 +179,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
|
||||||
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
||||||
- Support for Prometheus remote_read and remote_write
|
- Support for Prometheus remote_read and remote_write
|
||||||
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
||||||
|
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||||
|
|
||||||
## Interfaces
|
## Interfaces
|
||||||
|
|
||||||
|
@ -195,6 +201,7 @@ Support InfluxDB query parameters as follows.
|
||||||
- `precision` The time precision used by TDengine
|
- `precision` The time precision used by TDengine
|
||||||
- `u` TDengine user name
|
- `u` TDengine user name
|
||||||
- `p` TDengine password
|
- `p` TDengine password
|
||||||
|
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
|
||||||
|
|
||||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||||
|
@ -236,6 +243,10 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
|
||||||
|
|
||||||
<Prometheus />
|
<Prometheus />
|
||||||
|
|
||||||
|
### Get table's VGroup ID
|
||||||
|
|
||||||
|
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||||
|
|
||||||
## Memory usage optimization methods
|
## Memory usage optimization methods
|
||||||
|
|
||||||
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
|
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
title: taosBenchmark
|
title: taosBenchmark
|
||||||
sidebar_label: taosBenchmark
|
sidebar_label: taosBenchmark
|
||||||
|
description: This document describes how to use taosBenchmark, a tool for testing the performance of TDengine.
|
||||||
toc_max_heading_level: 4
|
toc_max_heading_level: 4
|
||||||
description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine."
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Introduction
|
# Introduction
|
||||||
|
@ -92,7 +92,7 @@ taosBenchmark -f <json file>
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## Command-line argument in detailed
|
## Command-line argument in detail
|
||||||
|
|
||||||
- **-f/--file <json file\>** :
|
- **-f/--file <json file\>** :
|
||||||
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
|
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
|
||||||
|
@ -198,19 +198,25 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
||||||
- **-R/--disorder-range <timeRange\>** :
|
- **-R/--disorder-range <timeRange\>** :
|
||||||
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
|
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
|
||||||
|
|
||||||
- **-F/--prepare_rand <Num\>** :
|
- **-F/--prepared_rand <Num\>** :
|
||||||
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||||
|
|
||||||
- **-a/--replica <replicaNum\>** :
|
- **-a/--replica <replicaNum\>** :
|
||||||
Specify the number of replicas when creating the database. The default value is 1.
|
Specify the number of replicas when creating the database. The default value is 1.
|
||||||
|
|
||||||
|
- **-k/--keep-trying <NUMBER\>** :
|
||||||
|
Keep trying if failed to insert, default is no. Available with v3.0.9+.
|
||||||
|
|
||||||
|
- **-z/--trying-interval <NUMBER\>** :
|
||||||
|
Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||||
|
|
||||||
- **-V/--version** :
|
- **-V/--version** :
|
||||||
Show version information only. Users should not use it with other parameters.
|
Show version information only. Users should not use it with other parameters.
|
||||||
|
|
||||||
- **-? /--help** :
|
- **-? /--help** :
|
||||||
Show help information and exit. Users should not use it with other parameters.
|
Show help information and exit. Users should not use it with other parameters.
|
||||||
|
|
||||||
## Configuration file parameters in detailed
|
## Configuration file parameters in detail
|
||||||
|
|
||||||
### General configuration parameters
|
### General configuration parameters
|
||||||
|
|
||||||
|
@ -231,6 +237,10 @@ The parameters listed in this section apply to all function modes.
|
||||||
|
|
||||||
`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters)
|
`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters)
|
||||||
|
|
||||||
|
- ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+.
|
||||||
|
|
||||||
|
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||||
|
|
||||||
#### Database related configuration parameters
|
#### Database related configuration parameters
|
||||||
|
|
||||||
The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database].
|
The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database].
|
||||||
|
@ -370,7 +380,7 @@ The configuration parameters for specifying super table tag columns and data col
|
||||||
- **num_of_records_per_req** :
|
- **num_of_records_per_req** :
|
||||||
Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements.
|
Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements.
|
||||||
|
|
||||||
- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
- **prepared_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||||
|
|
||||||
### Query scenario configuration parameters
|
### Query scenario configuration parameters
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: taosdump
|
title: taosdump
|
||||||
description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster."
|
description: This document describes how to use taosdump, a tool for backing up and restoring the data in a TDengine cluster.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
@ -19,7 +19,7 @@ Users should not use taosdump to back up raw data, environment settings, hardwar
|
||||||
|
|
||||||
There are two ways to install taosdump:
|
There are two ways to install taosdump:
|
||||||
|
|
||||||
- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it.
|
- Install the taosTools official installer. Please find taosTools from [Release History](https://docs.taosdata.com/releases/tools/) page and download and install it.
|
||||||
|
|
||||||
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||||
|
|
||||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 78 KiB |
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
title: TDinsight - Grafana-based Zero-Dependency Monitoring Solution for TDengine
|
title: TDinsight - Grafana-based Zero-Dependency Monitoring Solution for TDengine
|
||||||
sidebar_label: TDinsight
|
sidebar_label: TDinsight
|
||||||
|
description: This document describes TDinsight, a monitoring solution for TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana].
|
TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana].
|
||||||
|
@ -325,11 +326,12 @@ Currently, only the number of logins per minute is reported.
|
||||||
|
|
||||||
Support monitoring taosAdapter request statistics and status details. Includes.
|
Support monitoring taosAdapter request statistics and status details. Includes.
|
||||||
|
|
||||||
1. **http_request_inflight**: number of real-time requests.
|
1. **Http Request Total**: number of total requests.
|
||||||
2. **http_request_total**: number of total requests.
|
2. **Http Request Fail**: number of failed requests.
|
||||||
3. **http_request_fail**: number of failed requets.
|
3. **CPU Used**: CPU usage of taosAdapter.
|
||||||
4. **CPU Used**: CPU usage of taosAdapter.
|
4. **Memory Used**: Memory usage of taosAdapter.
|
||||||
5. **Memory Used**: Memory usage of taosAdapter.
|
5. **Http Request Inflight**: number of real-time requests.
|
||||||
|
6. **Http Status Code**: taosAdapter http status code.
|
||||||
|
|
||||||
## Upgrade
|
## Upgrade
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
title: TDengine Command Line Interface (CLI)
|
title: TDengine Command Line Interface (CLI)
|
||||||
sidebar_label: Command Line Interface
|
sidebar_label: Command Line Interface
|
||||||
description: Instructions and tips for using the TDengine CLI
|
description: This document describes how to use the TDengine CLI.
|
||||||
---
|
---
|
||||||
|
|
||||||
The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
|
The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: List of supported platforms
|
title: List of supported platforms
|
||||||
description: "List of platforms supported by TDengine server, client, and connector"
|
description: This document describes the supported platforms for the TDengine server, client, and connectors.
|
||||||
---
|
---
|
||||||
|
|
||||||
## List of supported platforms for TDengine server
|
## List of supported platforms for TDengine server
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Deploying TDengine with Docker
|
title: Deploying TDengine with Docker
|
||||||
description: "This chapter focuses on starting the TDengine service in a container and accessing it."
|
description: This chapter describes how to start and access TDengine in a Docker container.
|
||||||
---
|
---
|
||||||
|
|
||||||
This chapter describes how to start the TDengine service in a container and access it. Users can control the behavior of the service in the container by using environment variables on the docker run command-line or in the docker-compose file.
|
This chapter describes how to start the TDengine service in a container and access it. Users can control the behavior of the service in the container by using environment variables on the docker run command-line or in the docker-compose file.
|
||||||
|
@ -273,49 +273,48 @@ password: taosdata
|
||||||
|
|
||||||
## Start the TDengine cluster with docker-compose
|
## Start the TDengine cluster with docker-compose
|
||||||
|
|
||||||
1. The following docker-compose file starts a TDengine cluster with two replicas, two management nodes, two data nodes, and one arbitrator.
|
1. The following docker-compose file starts a TDengine cluster with three nodes.
|
||||||
|
|
||||||
```docker
|
```yml
|
||||||
version: "3"
|
version: "3"
|
||||||
services:
|
services:
|
||||||
arbitrator:
|
td-1:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
command: tarbitrator
|
environment:
|
||||||
td-1:
|
TAOS_FQDN: "td-1"
|
||||||
image: tdengine/tdengine:$VERSION
|
TAOS_FIRST_EP: "td-1"
|
||||||
environment:
|
volumes:
|
||||||
TAOS_FQDN: "td-1"
|
- taosdata-td1:/var/lib/taos/
|
||||||
TAOS_FIRST_EP: "td-1"
|
- taoslog-td1:/var/log/taos/
|
||||||
TAOS_NUM_OF_MNODES: "2"
|
td-2:
|
||||||
TAOS_REPLICA: "2"
|
image: tdengine/tdengine:$VERSION
|
||||||
TAOS_ARBITRATOR: arbitrator:6042
|
environment:
|
||||||
volumes:
|
TAOS_FQDN: "td-2"
|
||||||
- taosdata-td1:/var/lib/taos/
|
TAOS_FIRST_EP: "td-1"
|
||||||
- taoslog-td1:/var/log/taos/
|
volumes:
|
||||||
td-2:
|
- taosdata-td2:/var/lib/taos/
|
||||||
image: tdengine/tdengine:$VERSION
|
- taoslog-td2:/var/log/taos/
|
||||||
environment:
|
td-3:
|
||||||
TAOS_FQDN: "td-2"
|
image: tdengine/tdengine:$VERSION
|
||||||
TAOS_FIRST_EP: "td-1"
|
environment:
|
||||||
TAOS_NUM_OF_MNODES: "2"
|
TAOS_FQDN: "td-3"
|
||||||
TAOS_REPLICA: "2"
|
TAOS_FIRST_EP: "td-1"
|
||||||
TAOS_ARBITRATOR: arbitrator:6042
|
volumes:
|
||||||
volumes:
|
- taosdata-td3:/var/lib/taos/
|
||||||
- taosdata-td2:/var/lib/taos/
|
- taoslog-td3:/var/log/taos/
|
||||||
- taoslog-td2:/var/log/taos/
|
volumes:
|
||||||
volumes:
|
taosdata-td1:
|
||||||
taosdata-td1:
|
taoslog-td1:
|
||||||
taoslog-td1:
|
taosdata-td2:
|
||||||
taosdata-td2:
|
taoslog-td2:
|
||||||
taoslog-td2:
|
taosdata-td3:
|
||||||
```
|
taoslog-td3:
|
||||||
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- The `VERSION` environment variable is used to set the tdengine image tag
|
- The `VERSION` environment variable is used to set the tdengine image tag
|
||||||
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
|
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
|
||||||
- `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3]
|
|
||||||
We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment.
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
2. Start the cluster
|
2. Start the cluster
|
||||||
|
@ -345,17 +344,18 @@ password: taosdata
|
||||||
|
|
||||||
4. Show dnodes via TDengine CLI
|
4. Show dnodes via TDengine CLI
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ docker-compose exec td-1 taos -s "show dnodes"
|
$ docker-compose exec td-1 taos -s "show dnodes"
|
||||||
|
|
||||||
taos> show dnodes
|
taos> show dnodes
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||||
======================================================================================================================================
|
======================================================================================================================================
|
||||||
1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | |
|
1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | |
|
||||||
2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | |
|
2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | |
|
||||||
0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - |
|
3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | |
|
||||||
Query OK, 3 row(s) in set (0.000811s)
|
Query OK, 3 rows in database (0.021262s)
|
||||||
```
|
|
||||||
|
```
|
||||||
|
|
||||||
## taosAdapter
|
## taosAdapter
|
||||||
|
|
||||||
|
@ -373,83 +373,70 @@ password: taosdata
|
||||||
|
|
||||||
Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example:
|
Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example:
|
||||||
|
|
||||||
```docker
|
```yml
|
||||||
version: "3"
|
version: "3"
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
inter:
|
inter:
|
||||||
api:
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
arbitrator:
|
td-1:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
command: tarbitrator
|
networks:
|
||||||
networks:
|
- inter
|
||||||
- inter
|
environment:
|
||||||
td-1:
|
TAOS_FQDN: "td-1"
|
||||||
image: tdengine/tdengine:$VERSION
|
TAOS_FIRST_EP: "td-1"
|
||||||
networks:
|
volumes:
|
||||||
- inter
|
- taosdata-td1:/var/lib/taos/
|
||||||
environment:
|
- taoslog-td1:/var/log/taos/
|
||||||
TAOS_FQDN: "td-1"
|
td-2:
|
||||||
TAOS_FIRST_EP: "td-1"
|
image: tdengine/tdengine:$VERSION
|
||||||
TAOS_NUM_OF_MNODES: "2"
|
networks:
|
||||||
TAOS_REPLICA: "2"
|
- inter
|
||||||
TAOS_ARBITRATOR: arbitrator:6042
|
environment:
|
||||||
volumes:
|
TAOS_FQDN: "td-2"
|
||||||
- taosdata-td1:/var/lib/taos/
|
TAOS_FIRST_EP: "td-1"
|
||||||
- taoslog-td1:/var/log/taos/
|
volumes:
|
||||||
td-2:
|
- taosdata-td2:/var/lib/taos/
|
||||||
image: tdengine/tdengine:$VERSION
|
- taoslog-td2:/var/log/taos/
|
||||||
networks:
|
adapter:
|
||||||
- inter
|
image: tdengine/tdengine:$VERSION
|
||||||
environment:
|
entrypoint: "taosadapter"
|
||||||
TAOS_FQDN: "td-2"
|
networks:
|
||||||
TAOS_FIRST_EP: "td-1"
|
- inter
|
||||||
TAOS_NUM_OF_MNODES: "2"
|
environment:
|
||||||
TAOS_REPLICA: "2"
|
TAOS_FIRST_EP: "td-1"
|
||||||
TAOS_ARBITRATOR: arbitrator:6042
|
TAOS_SECOND_EP: "td-2"
|
||||||
volumes:
|
deploy:
|
||||||
- taosdata-td2:/var/lib/taos/
|
replicas: 4
|
||||||
- taoslog-td2:/var/log/taos/
|
nginx:
|
||||||
adapter:
|
image: nginx
|
||||||
image: tdengine/tdengine:$VERSION
|
depends_on:
|
||||||
command: taosadapter
|
- adapter
|
||||||
networks:
|
networks:
|
||||||
- inter
|
- inter
|
||||||
environment:
|
ports:
|
||||||
TAOS_FIRST_EP: "td-1"
|
- 6041:6041
|
||||||
TAOS_SECOND_EP: "td-2"
|
- 6044:6044/udp
|
||||||
deploy:
|
command: [
|
||||||
replicas: 4
|
"sh",
|
||||||
nginx:
|
"-c",
|
||||||
image: nginx
|
"while true;
|
||||||
depends_on:
|
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
||||||
- adapter
|
done;
|
||||||
networks:
|
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
||||||
- inter
|
> /etc/nginx/conf.d/rest.conf;
|
||||||
- api
|
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
||||||
ports:
|
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||||
- 6041:6041
|
nginx -g 'daemon off;'",
|
||||||
- 6044:6044/udp
|
]
|
||||||
command: [
|
volumes:
|
||||||
"sh",
|
taosdata-td1:
|
||||||
"-c",
|
taoslog-td1:
|
||||||
"while true;
|
taosdata-td2:
|
||||||
do curl -s http://adapter:6041/-/ping >/dev/null && break;
|
taoslog-td2:
|
||||||
done;
|
```
|
||||||
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
|
|
||||||
> /etc/nginx/conf.d/rest.conf;
|
|
||||||
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
|
|
||||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
|
||||||
nginx -g 'daemon off;'",
|
|
||||||
]
|
|
||||||
volumes:
|
|
||||||
taosdata-td1:
|
|
||||||
taoslog-td1:
|
|
||||||
taosdata-td2:
|
|
||||||
taoslog-td2:
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploy with docker swarm
|
## Deploy with docker swarm
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Configuration Parameters
|
title: Configuration Parameters
|
||||||
description: "Configuration parameters for client and server in TDengine"
|
description: This document describes the configuration parameters for the TDengine server and client.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Configuration File on Server Side
|
## Configuration File on Server Side
|
||||||
|
@ -106,7 +106,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
|
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
|
||||||
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
||||||
| Default | 1 |
|
| Default | 0 |
|
||||||
|
|
||||||
### monitorFqdn
|
### monitorFqdn
|
||||||
|
|
||||||
|
@ -142,6 +142,15 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||||
| Value Range | 0: Not allowed; 1: Allowed |
|
| Value Range | 0: Not allowed; 1: Allowed |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
### crashReporting
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | -------------------------------------------- |
|
||||||
|
| Applicable | Server Only |
|
||||||
|
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
||||||
|
| Value Range | 0,1 0: Not allowed;1:allowed |
|
||||||
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
|
||||||
## Query Parameters
|
## Query Parameters
|
||||||
|
|
||||||
|
@ -153,11 +162,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Meaning | Execution policy for query statements |
|
| Meaning | Execution policy for query statements |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
| Notes | 1: Run queries on vnodes and not on qnodes |
|
| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. |
|
||||||
|
|
||||||
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
|
|
||||||
|
|
||||||
3: Only run scan operators on vnodes; run all other operators on qnodes.
|
|
||||||
|
|
||||||
### querySmaOptimize
|
### querySmaOptimize
|
||||||
|
|
||||||
|
@ -167,12 +172,17 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Meaning | SMA index optimization policy |
|
| Meaning | SMA index optimization policy |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
| Notes |
|
| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
||||||
|
|
||||||
0: Disable SMA indexing and perform all queries on non-indexed data.
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| Applicable | Server only |
|
||||||
|
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||||
|
| Vlue Range | 0:Return empty line,1:Return 0 |
|
||||||
|
| Default | 1 |
|
||||||
|
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
||||||
|
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
|
@ -306,6 +316,15 @@ The charset that takes effect is UTF-8.
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | All data files are stored in this directory |
|
| Meaning | All data files are stored in this directory |
|
||||||
| Default Value | /var/lib/taos |
|
| Default Value | /var/lib/taos |
|
||||||
|
| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
|
||||||
|
|
||||||
|
### tempDir
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | ------------------------------------------ |
|
||||||
|
| Applicable | Server only |
|
||||||
|
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||||
|
| Default | /tmp |
|
||||||
|
|
||||||
### minimalTmpDirGB
|
### minimalTmpDirGB
|
||||||
|
|
||||||
|
@ -336,89 +355,6 @@ The charset that takes effect is UTF-8.
|
||||||
| Value Range | 0-4096 |
|
| Value Range | 0-4096 |
|
||||||
| Default Value | 2x the CPU cores |
|
| Default Value | 2x the CPU cores |
|
||||||
|
|
||||||
## Time Parameters
|
|
||||||
|
|
||||||
### statusInterval
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | --------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | the interval of dnode reporting status to mnode |
|
|
||||||
| Unit | second |
|
|
||||||
| Value Range | 1-10 |
|
|
||||||
| Default Value | 1 |
|
|
||||||
|
|
||||||
### shellActivityTimer
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | --------------------------------- |
|
|
||||||
| Applicable | Server and Client |
|
|
||||||
| Meaning | The interval for TDengine CLI to send heartbeat to mnode |
|
|
||||||
| Unit | second |
|
|
||||||
| Value Range | 1-120 |
|
|
||||||
| Default Value | 3 |
|
|
||||||
|
|
||||||
## Performance Optimization Parameters
|
|
||||||
|
|
||||||
### numOfCommitThreads
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | ---------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | Maximum of threads for committing to disk |
|
|
||||||
| Default Value | |
|
|
||||||
|
|
||||||
## Compression Parameters
|
|
||||||
|
|
||||||
### compressMsgSize
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | The threshold for message size to compress the message. | Set the value to 64330 bytes for good message compression. |
|
|
||||||
| Unit | bytes |
|
|
||||||
| Value Range | 0: already compress; >0: compress when message exceeds it; -1: always uncompress |
|
|
||||||
| Default Value | -1 |
|
|
||||||
|
|
||||||
### compressColData
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| -------- | --------------------------------------------------------------------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | The threshold for size of column data to trigger compression for the query result |
|
|
||||||
| Unit | bytes |
|
|
||||||
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
|
|
||||||
| Default Value | -1 |
|
|
||||||
| Default Value | -1 |
|
|
||||||
| Note | available from version 2.3.0.0 | |
|
|
||||||
|
|
||||||
## Continuous Query Parameters |
|
|
||||||
|
|
||||||
### minSlidingTime
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| ------------- | -------------------------------------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | Minimum sliding time of time window |
|
|
||||||
| Unit | millisecond or microsecond , depending on time precision |
|
|
||||||
| Value Range | 10-1000000 |
|
|
||||||
| Default Value | 10 |
|
|
||||||
|
|
||||||
### minIntervalTime
|
|
||||||
|
|
||||||
| Attribute | Description |
|
|
||||||
| ------------- | --------------------------- |
|
|
||||||
| Applicable | Server Only |
|
|
||||||
| Meaning | Minimum size of time window |
|
|
||||||
| Unit | millisecond |
|
|
||||||
| Value Range | 1-1000000 |
|
|
||||||
| Default Value | 10 |
|
|
||||||
|
|
||||||
:::info
|
|
||||||
To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Log Parameters
|
## Log Parameters
|
||||||
|
|
||||||
### logDir
|
### logDir
|
||||||
|
@ -661,10 +597,22 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| -------- | ----------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Whether schemaless columns are consistently ordered |
|
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
|
||||||
| Value Range | 0: not consistent; 1: consistent. |
|
| Value Range | 0: not consistent; 1: consistent. |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
|
|
||||||
|
## Compress Parameters
|
||||||
|
|
||||||
|
### compressMsgSize
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | ----------------------------- |
|
||||||
|
| Applicable | Both Client and Server side |
|
||||||
|
| Meaning | Whether RPC message is compressed |
|
||||||
|
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
||||||
|
| Default | -1 |
|
||||||
|
|
||||||
|
|
||||||
## Other Parameters
|
## Other Parameters
|
||||||
|
|
||||||
### enableCoreFile
|
### enableCoreFile
|
||||||
|
@ -686,172 +634,60 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
||||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
## Parameter Comparison of TDengine 2.x and 3.0
|
|
||||||
| # | **Parameter** | **In 2.x** | **In 3.0** |
|
## 3.0 Parameters
|
||||||
| --- | :-----------------: | --------------- | --------------- |
|
|
||||||
| 1 | firstEp | Yes | Yes |
|
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||||
| 2 | secondEp | Yes | Yes |
|
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||||
| 3 | fqdn | Yes | Yes |
|
| 1 | firstEp | Yes | Yes | |
|
||||||
| 4 | serverPort | Yes | Yes |
|
| 2 | secondEp | Yes | Yes | |
|
||||||
| 5 | maxShellConns | Yes | Yes |
|
| 3 | fqdn | Yes | Yes | |
|
||||||
| 6 | monitor | Yes | Yes |
|
| 4 | serverPort | Yes | Yes | |
|
||||||
| 7 | monitorFqdn | No | Yes |
|
| 5 | maxShellConns | Yes | Yes | |
|
||||||
| 8 | monitorPort | No | Yes |
|
| 6 | monitor | Yes | Yes | |
|
||||||
| 9 | monitorInterval | Yes | Yes |
|
| 7 | monitorFqdn | No | Yes | |
|
||||||
| 10 | monitorMaxLogs | No | Yes |
|
| 8 | monitorPort | No | Yes | |
|
||||||
| 11 | monitorComp | No | Yes |
|
| 9 | monitorInterval | Yes | Yes | |
|
||||||
| 12 | telemetryReporting | Yes | Yes |
|
| 10 | queryPolicy | No | Yes | |
|
||||||
| 13 | telemetryInterval | No | Yes |
|
| 11 | querySmaOptimize | No | Yes | |
|
||||||
| 14 | telemetryServer | No | Yes |
|
| 12 | maxNumOfDistinctRes | Yes | Yes | |
|
||||||
| 15 | telemetryPort | No | Yes |
|
| 15 | countAlwaysReturnValue | Yes | Yes | |
|
||||||
| 16 | queryPolicy | No | Yes |
|
| 16 | dataDir | Yes | Yes | |
|
||||||
| 17 | querySmaOptimize | No | Yes |
|
| 17 | minimalDataDirGB | Yes | Yes | |
|
||||||
| 18 | queryRsmaTolerance | No | Yes |
|
| 18 | supportVnodes | No | Yes | |
|
||||||
| 19 | queryBufferSize | Yes | Yes |
|
| 19 | tempDir | Yes | Yes | |
|
||||||
| 20 | maxNumOfDistinctRes | Yes | Yes |
|
| 20 | minimalTmpDirGB | Yes | Yes | |
|
||||||
| 21 | minSlidingTime | Yes | Yes |
|
| 21 | smlChildTableName | Yes | Yes | |
|
||||||
| 22 | minIntervalTime | Yes | Yes |
|
| 22 | smlTagName | Yes | Yes | |
|
||||||
| 23 | countAlwaysReturnValue | Yes | Yes |
|
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
|
||||||
| 24 | dataDir | Yes | Yes |
|
| 24 | statusInterval | Yes | Yes | |
|
||||||
| 25 | minimalDataDirGB | Yes | Yes |
|
| 25 | logDir | Yes | Yes | |
|
||||||
| 26 | supportVnodes | No | Yes |
|
| 26 | minimalLogDirGB | Yes | Yes | |
|
||||||
| 27 | tempDir | Yes | Yes |
|
| 27 | numOfLogLines | Yes | Yes | |
|
||||||
| 28 | minimalTmpDirGB | Yes | Yes |
|
| 28 | asyncLog | Yes | Yes | |
|
||||||
| 29 | compressMsgSize | Yes | Yes |
|
| 29 | logKeepDays | Yes | Yes | |
|
||||||
| 30 | compressColData | Yes | Yes |
|
| 30 | debugFlag | Yes | Yes | |
|
||||||
| 31 | smlChildTableName | Yes | Yes |
|
| 31 | tmrDebugFlag | Yes | Yes | |
|
||||||
| 32 | smlTagName | Yes | Yes |
|
| 32 | uDebugFlag | Yes | Yes | |
|
||||||
| 33 | smlDataFormat | No | Yes |
|
| 33 | rpcDebugFlag | Yes | Yes | |
|
||||||
| 34 | statusInterval | Yes | Yes |
|
| 34 | jniDebugFlag | Yes | Yes | |
|
||||||
| 35 | shellActivityTimer | Yes | Yes |
|
| 35 | qDebugFlag | Yes | Yes | |
|
||||||
| 36 | transPullupInterval | No | Yes |
|
| 36 | cDebugFlag | Yes | Yes | |
|
||||||
| 37 | mqRebalanceInterval | No | Yes |
|
| 37 | dDebugFlag | Yes | Yes | |
|
||||||
| 38 | ttlUnit | No | Yes |
|
| 38 | vDebugFlag | Yes | Yes | |
|
||||||
| 39 | ttlPushInterval | No | Yes |
|
| 39 | mDebugFlag | Yes | Yes | |
|
||||||
| 40 | numOfTaskQueueThreads | No | Yes |
|
| 40 | wDebugFlag | Yes | Yes | |
|
||||||
| 41 | numOfRpcThreads | No | Yes |
|
| 41 | sDebugFlag | Yes | Yes | |
|
||||||
| 42 | numOfCommitThreads | Yes | Yes |
|
| 42 | tsdbDebugFlag | Yes | Yes | |
|
||||||
| 43 | numOfMnodeReadThreads | No | Yes |
|
| 43 | tqDebugFlag | No | Yes | |
|
||||||
| 44 | numOfVnodeQueryThreads | No | Yes |
|
| 44 | fsDebugFlag | Yes | Yes | |
|
||||||
| 45 | ratioOfVnodeStreamThreads | No | Yes |
|
| 45 | udfDebugFlag | No | Yes | |
|
||||||
| 46 | numOfVnodeFetchThreads | No | Yes |
|
| 46 | smaDebugFlag | No | Yes | |
|
||||||
| 47 | numOfVnodeRsmaThreads | No | Yes |
|
| 47 | idxDebugFlag | No | Yes | |
|
||||||
| 48 | numOfQnodeQueryThreads | No | Yes |
|
| 48 | tdbDebugFlag | No | Yes | |
|
||||||
| 49 | numOfQnodeFetchThreads | No | Yes |
|
| 49 | metaDebugFlag | No | Yes | |
|
||||||
| 50 | numOfSnodeSharedThreads | No | Yes |
|
| 50 | timezone | Yes | Yes | |
|
||||||
| 51 | numOfSnodeUniqueThreads | No | Yes |
|
| 51 | locale | Yes | Yes | |
|
||||||
| 52 | rpcQueueMemoryAllowed | No | Yes |
|
| 52 | charset | Yes | Yes | |
|
||||||
| 53 | logDir | Yes | Yes |
|
| 53 | udf | Yes | Yes | |
|
||||||
| 54 | minimalLogDirGB | Yes | Yes |
|
| 54 | enableCoreFile | Yes | Yes | |
|
||||||
| 55 | numOfLogLines | Yes | Yes |
|
|
||||||
| 56 | asyncLog | Yes | Yes |
|
|
||||||
| 57 | logKeepDays | Yes | Yes |
|
|
||||||
| 60 | debugFlag | Yes | Yes |
|
|
||||||
| 61 | tmrDebugFlag | Yes | Yes |
|
|
||||||
| 62 | uDebugFlag | Yes | Yes |
|
|
||||||
| 63 | rpcDebugFlag | Yes | Yes |
|
|
||||||
| 64 | jniDebugFlag | Yes | Yes |
|
|
||||||
| 65 | qDebugFlag | Yes | Yes |
|
|
||||||
| 66 | cDebugFlag | Yes | Yes |
|
|
||||||
| 67 | dDebugFlag | Yes | Yes |
|
|
||||||
| 68 | vDebugFlag | Yes | Yes |
|
|
||||||
| 69 | mDebugFlag | Yes | Yes |
|
|
||||||
| 70 | wDebugFlag | Yes | Yes |
|
|
||||||
| 71 | sDebugFlag | Yes | Yes |
|
|
||||||
| 72 | tsdbDebugFlag | Yes | Yes |
|
|
||||||
| 73 | tqDebugFlag | No | Yes |
|
|
||||||
| 74 | fsDebugFlag | Yes | Yes |
|
|
||||||
| 75 | udfDebugFlag | No | Yes |
|
|
||||||
| 76 | smaDebugFlag | No | Yes |
|
|
||||||
| 77 | idxDebugFlag | No | Yes |
|
|
||||||
| 78 | tdbDebugFlag | No | Yes |
|
|
||||||
| 79 | metaDebugFlag | No | Yes |
|
|
||||||
| 80 | timezone | Yes | Yes |
|
|
||||||
| 81 | locale | Yes | Yes |
|
|
||||||
| 82 | charset | Yes | Yes |
|
|
||||||
| 83 | udf | Yes | Yes |
|
|
||||||
| 84 | enableCoreFile | Yes | Yes |
|
|
||||||
| 85 | arbitrator | Yes | No |
|
|
||||||
| 86 | numOfThreadsPerCore | Yes | No |
|
|
||||||
| 87 | numOfMnodes | Yes | No |
|
|
||||||
| 88 | vnodeBak | Yes | No |
|
|
||||||
| 89 | balance | Yes | No |
|
|
||||||
| 90 | balanceInterval | Yes | No |
|
|
||||||
| 91 | offlineThreshold | Yes | No |
|
|
||||||
| 92 | role | Yes | No |
|
|
||||||
| 93 | dnodeNopLoop | Yes | No |
|
|
||||||
| 94 | keepTimeOffset | Yes | No |
|
|
||||||
| 95 | rpcTimer | Yes | No |
|
|
||||||
| 96 | rpcMaxTime | Yes | No |
|
|
||||||
| 97 | rpcForceTcp | Yes | No |
|
|
||||||
| 98 | tcpConnTimeout | Yes | No |
|
|
||||||
| 99 | syncCheckInterval | Yes | No |
|
|
||||||
| 100 | maxTmrCtrl | Yes | No |
|
|
||||||
| 101 | monitorReplica | Yes | No |
|
|
||||||
| 102 | smlTagNullName | Yes | No |
|
|
||||||
| 103 | keepColumnName | Yes | No |
|
|
||||||
| 104 | ratioOfQueryCores | Yes | No |
|
|
||||||
| 105 | maxStreamCompDelay | Yes | No |
|
|
||||||
| 106 | maxFirstStreamCompDelay | Yes | No |
|
|
||||||
| 107 | retryStreamCompDelay | Yes | No |
|
|
||||||
| 108 | streamCompDelayRatio | Yes | No |
|
|
||||||
| 109 | maxVgroupsPerDb | Yes | No |
|
|
||||||
| 110 | maxTablesPerVnode | Yes | No |
|
|
||||||
| 111 | minTablesPerVnode | Yes | No |
|
|
||||||
| 112 | tableIncStepPerVnode | Yes | No |
|
|
||||||
| 113 | cache | Yes | No |
|
|
||||||
| 114 | blocks | Yes | No |
|
|
||||||
| 115 | days | Yes | No |
|
|
||||||
| 116 | keep | Yes | No |
|
|
||||||
| 117 | minRows | Yes | No |
|
|
||||||
| 118 | maxRows | Yes | No |
|
|
||||||
| 119 | quorum | Yes | No |
|
|
||||||
| 120 | comp | Yes | No |
|
|
||||||
| 121 | walLevel | Yes | No |
|
|
||||||
| 122 | fsync | Yes | No |
|
|
||||||
| 123 | replica | Yes | No |
|
|
||||||
| 124 | partitions | Yes | No |
|
|
||||||
| 125 | quorum | Yes | No |
|
|
||||||
| 126 | update | Yes | No |
|
|
||||||
| 127 | cachelast | Yes | No |
|
|
||||||
| 128 | maxSQLLength | Yes | No |
|
|
||||||
| 129 | maxWildCardsLength | Yes | No |
|
|
||||||
| 130 | maxRegexStringLen | Yes | No |
|
|
||||||
| 131 | maxNumOfOrderedRes | Yes | No |
|
|
||||||
| 132 | maxConnections | Yes | No |
|
|
||||||
| 133 | mnodeEqualVnodeNum | Yes | No |
|
|
||||||
| 134 | http | Yes | No |
|
|
||||||
| 135 | httpEnableRecordSql | Yes | No |
|
|
||||||
| 136 | httpMaxThreads | Yes | No |
|
|
||||||
| 137 | restfulRowLimit | Yes | No |
|
|
||||||
| 138 | httpDbNameMandatory | Yes | No |
|
|
||||||
| 139 | httpKeepAlive | Yes | No |
|
|
||||||
| 140 | enableRecordSql | Yes | No |
|
|
||||||
| 141 | maxBinaryDisplayWidth | Yes | No |
|
|
||||||
| 142 | stream | Yes | No |
|
|
||||||
| 143 | retrieveBlockingModel | Yes | No |
|
|
||||||
| 144 | tsdbMetaCompactRatio | Yes | No |
|
|
||||||
| 145 | defaultJSONStrType | Yes | No |
|
|
||||||
| 146 | walFlushSize | Yes | No |
|
|
||||||
| 147 | keepTimeOffset | Yes | No |
|
|
||||||
| 148 | flowctrl | Yes | No |
|
|
||||||
| 149 | slaveQuery | Yes | No |
|
|
||||||
| 150 | adjustMaster | Yes | No |
|
|
||||||
| 151 | topicBinaryLen | Yes | No |
|
|
||||||
| 152 | telegrafUseFieldNum | Yes | No |
|
|
||||||
| 153 | deadLockKillQuery | Yes | No |
|
|
||||||
| 154 | clientMerge | Yes | No |
|
|
||||||
| 155 | sdbDebugFlag | Yes | No |
|
|
||||||
| 156 | odbcDebugFlag | Yes | No |
|
|
||||||
| 157 | httpDebugFlag | Yes | No |
|
|
||||||
| 158 | monDebugFlag | Yes | No |
|
|
||||||
| 159 | cqDebugFlag | Yes | No |
|
|
||||||
| 160 | shortcutFlag | Yes | No |
|
|
||||||
| 161 | probeSeconds | Yes | No |
|
|
||||||
| 162 | probeKillSeconds | Yes | No |
|
|
||||||
| 163 | probeInterval | Yes | No |
|
|
||||||
| 164 | lossyColumns | Yes | No |
|
|
||||||
| 165 | fPrecision | Yes | No |
|
|
||||||
| 166 | dPrecision | Yes | No |
|
|
||||||
| 167 | maxRange | Yes | No |
|
|
||||||
| 168 | range | Yes | No |
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: File directory structure
|
title: File directory structure
|
||||||
description: "TDengine installation directory description"
|
description: This document describes the structure of the TDengine directory after installation.
|
||||||
---
|
---
|
||||||
|
|
||||||
After TDengine is installed, the following directories or files will be created in the system by default.
|
After TDengine is installed, the following directories or files will be created in the system by default.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Schemaless Writing
|
title: Schemaless Writing
|
||||||
description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
|
description: This document describes how to use the schemaless write component of TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
|
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
|
||||||
|
@ -80,7 +80,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
||||||
NULL.
|
NULL.
|
||||||
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
||||||
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
||||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, discarded since 3.0.3.0)
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: taosKeeper
|
sidebar_label: taosKeeper
|
||||||
title: taosKeeper
|
title: taosKeeper
|
||||||
description: exports TDengine monitoring metrics.
|
description: This document describes how to use taosKeeper, a tool for exporting TDengine monitoring metrics.
|
||||||
---
|
---
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
@ -24,7 +24,14 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
|
||||||
|
|
||||||
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
|
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
|
||||||
|
|
||||||
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
|
**Make sure that the TDengine cluster is running correctly before running taosKeeper.** Ensure that the monitoring service in TDengine has been started. At least the values of `monitor` and `monitorFqdn` need to be set in `taos.cfg`.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
monitor 1
|
||||||
|
monitorFqdn localhost # taoskeeper's FQDN
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
|
||||||
|
|
||||||
### Command-Line Parameters
|
### Command-Line Parameters
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Reference
|
title: Reference
|
||||||
|
description: This document describes TDengine connectors and utilities.
|
||||||
---
|
---
|
||||||
|
|
||||||
This section describes the TDengine connectors and utilities.
|
This section describes the TDengine connectors and utilities.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Grafana
|
|
||||||
title: Grafana
|
title: Grafana
|
||||||
|
sidebar_label: Grafana
|
||||||
|
description: This document describes how to integrate TDengine with Grafana.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
|
@ -155,13 +156,13 @@ You can setup a zero-configuration stack for TDengine + Grafana by [docker-compo
|
||||||
|
|
||||||
services:
|
services:
|
||||||
tdengine:
|
tdengine:
|
||||||
image: tdengine/tdengine:2.6.0.2
|
image: tdengine/tdengine:3.0.2.4
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: tdengine
|
TAOS_FQDN: tdengine
|
||||||
volumes:
|
volumes:
|
||||||
- tdengine-data:/var/lib/taos/
|
- tdengine-data:/var/lib/taos/
|
||||||
grafana:
|
grafana:
|
||||||
image: grafana/grafana:8.5.6
|
image: grafana/grafana:9.3.6
|
||||||
volumes:
|
volumes:
|
||||||
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
|
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
|
||||||
- grafana-data:/var/lib/grafana
|
- grafana-data:/var/lib/grafana
|
||||||
|
@ -196,11 +197,18 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
|
||||||
- INPUT SQL: Enter the desired query (the results being two columns and multiple rows), such as `select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)`. In this statement, $from, $to, and $interval are variables that Grafana replaces with the query time range and interval. In addition to the built-in variables, custom template variables are also supported.
|
- INPUT SQL: Enter the desired query (the results being two columns and multiple rows), such as `select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)`. In this statement, $from, $to, and $interval are variables that Grafana replaces with the query time range and interval. In addition to the built-in variables, custom template variables are also supported.
|
||||||
- ALIAS BY: This allows you to set the current query alias.
|
- ALIAS BY: This allows you to set the current query alias.
|
||||||
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement.
|
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement.
|
||||||
|
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
|
||||||
|
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
|
||||||
|
|
||||||
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
|
||||||
|
The example to query the average system memory usage for the specified interval on each server as follows.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/).
|
> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/).
|
||||||
|
|
||||||
### Importing the Dashboard
|
### Importing the Dashboard
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Prometheus
|
|
||||||
title: Prometheus writing and reading
|
title: Prometheus writing and reading
|
||||||
|
sidebar_label: Prometheus
|
||||||
|
description: This document describes how to integrate TDengine with Prometheus.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Prometheus from "../14-reference/_prometheus.mdx"
|
import Prometheus from "../14-reference/_prometheus.mdx"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Telegraf
|
|
||||||
title: Telegraf writing
|
title: Telegraf writing
|
||||||
|
sidebar_label: Telegraf
|
||||||
|
description: This document describes how to integrate TDengine with Telegraf.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Telegraf from "../14-reference/_telegraf.mdx"
|
import Telegraf from "../14-reference/_telegraf.mdx"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: collectd
|
|
||||||
title: collectd writing
|
title: collectd writing
|
||||||
|
sidebar_label: collectd
|
||||||
|
description: This document describes how to integrate TDengine with collectd.
|
||||||
---
|
---
|
||||||
|
|
||||||
import CollectD from "../14-reference/_collectd.mdx"
|
import CollectD from "../14-reference/_collectd.mdx"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: StatsD
|
|
||||||
title: StatsD Writing
|
title: StatsD Writing
|
||||||
|
sidebar_label: StatsD
|
||||||
|
description: This document describes how to integrate TDengine with StatsD.
|
||||||
---
|
---
|
||||||
|
|
||||||
import StatsD from "../14-reference/_statsd.mdx"
|
import StatsD from "../14-reference/_statsd.mdx"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: icinga2
|
|
||||||
title: icinga2 writing
|
title: icinga2 writing
|
||||||
|
sidebar_label: icinga2
|
||||||
|
description: This document describes how to integrate TDengine with icinga2.
|
||||||
---
|
---
|
||||||
|
|
||||||
import Icinga2 from "../14-reference/_icinga2.mdx"
|
import Icinga2 from "../14-reference/_icinga2.mdx"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: TCollector
|
|
||||||
title: TCollector writing
|
title: TCollector writing
|
||||||
|
sidebar_label: TCollector
|
||||||
|
description: This document describes how to integrate TDengine with TCollector.
|
||||||
---
|
---
|
||||||
|
|
||||||
import TCollector from "../14-reference/_tcollector.mdx"
|
import TCollector from "../14-reference/_tcollector.mdx"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
sidebar_label: EMQX Broker
|
|
||||||
title: EMQX Broker writing
|
title: EMQX Broker writing
|
||||||
|
sidebar_label: EMQX Broker
|
||||||
|
description: This document describes how to integrate TDengine with the EMQX broker.
|
||||||
---
|
---
|
||||||
|
|
||||||
MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.).
|
MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.).
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue