other: merge 3.0
This commit is contained in:
commit
9da42b7eb8
|
@ -163,4 +163,12 @@ geos_c.h
|
|||
source/libs/parser/src/sql.c
|
||||
include/common/ttokenauto.h
|
||||
!packaging/smokeTest/pytest_require.txt
|
||||
tdengine-test-dir
|
||||
tdengine-test-dir/
|
||||
localtime.c
|
||||
private.h
|
||||
strftime.c
|
||||
tzdir.h
|
||||
tzfile.h
|
||||
coverage.info
|
||||
taos
|
||||
taosd
|
||||
|
|
|
@ -7,6 +7,9 @@ file_zh_changed = ''
|
|||
file_en_changed = ''
|
||||
file_no_doc_changed = '1'
|
||||
file_only_tdgpt_change_except = '1'
|
||||
tdgpt_file = "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics"
|
||||
|
||||
|
||||
def abortPreviousBuilds() {
|
||||
def currentJobName = env.JOB_NAME
|
||||
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
|
||||
|
@ -67,7 +70,7 @@ def check_docs(){
|
|||
returnStdout: true
|
||||
)
|
||||
|
||||
file_no_doc_changed = sh (
|
||||
def file_no_doc_changed = sh (
|
||||
script: '''
|
||||
cd ${WKC}
|
||||
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" || :
|
||||
|
@ -78,7 +81,7 @@ def check_docs(){
|
|||
file_only_tdgpt_change_except = sh (
|
||||
script: '''
|
||||
cd ${WKC}
|
||||
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || :
|
||||
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" ||:
|
||||
''',
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
@ -570,7 +573,7 @@ pipeline {
|
|||
cd ${WKC}/tests/parallel_test
|
||||
./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + '''
|
||||
'''
|
||||
if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task/ ) {
|
||||
if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) {
|
||||
sh '''
|
||||
cd ${WKC}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
</p>
|
||||
<p>
|
||||
|
||||
[](https://cloud.drone.io/taosdata/TDengine)
|
||||
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
<br />
|
||||
|
|
|
@ -131,7 +131,7 @@ IF(TD_WINDOWS)
|
|||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
ENDIF()
|
||||
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO /FORCE:MULTIPLE")
|
||||
|
||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.3.4.8.alpha")
|
||||
SET(TD_VER_NUMBER "3.3.5.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# libuv
|
||||
ExternalProject_Add(libuv
|
||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||
GIT_TAG v1.48.0
|
||||
GIT_TAG v1.49.2
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||
CONFIGURE_COMMAND ""
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -7,19 +7,19 @@ import Tabs from "@theme/Tabs";
|
|||
import TabItem from "@theme/TabItem";
|
||||
import Image from '@theme/IdealImage';
|
||||
import imgConnect from '../assets/connecting-to-tdengine-01.png';
|
||||
import ConnJava from "./_connect_java.mdx";
|
||||
import ConnGo from "./_connect_go.mdx";
|
||||
import ConnRust from "./_connect_rust.mdx";
|
||||
import ConnNode from "./_connect_node.mdx";
|
||||
import ConnPythonNative from "./_connect_python.mdx";
|
||||
import ConnCSNative from "./_connect_cs.mdx";
|
||||
import ConnC from "./_connect_c.mdx";
|
||||
import InstallOnLinux from "../14-reference/05-connector/_linux_install.mdx";
|
||||
import InstallOnWindows from "../14-reference/05-connector/_windows_install.mdx";
|
||||
import InstallOnMacOS from "../14-reference/05-connector/_macos_install.mdx";
|
||||
import VerifyLinux from "../14-reference/05-connector/_verify_linux.mdx";
|
||||
import VerifyMacOS from "../14-reference/05-connector/_verify_macos.mdx";
|
||||
import VerifyWindows from "../14-reference/05-connector/_verify_windows.mdx";
|
||||
import ConnJava from "../assets/resources/_connect_java.mdx";
|
||||
import ConnGo from "../assets/resources/_connect_go.mdx";
|
||||
import ConnRust from "../assets/resources/_connect_rust.mdx";
|
||||
import ConnNode from "../assets/resources/_connect_node.mdx";
|
||||
import ConnPythonNative from "../assets/resources/_connect_python.mdx";
|
||||
import ConnCSNative from "../assets/resources/_connect_cs.mdx";
|
||||
import ConnC from "../assets/resources/_connect_c.mdx";
|
||||
import InstallOnLinux from "../assets/resources/_linux_install.mdx";
|
||||
import InstallOnWindows from "../assets/resources/_windows_install.mdx";
|
||||
import InstallOnMacOS from "../assets/resources/_macos_install.mdx";
|
||||
import VerifyLinux from "../assets/resources/_verify_linux.mdx";
|
||||
import VerifyMacOS from "../assets/resources/_verify_macos.mdx";
|
||||
import VerifyWindows from "../assets/resources/_verify_windows.mdx";
|
||||
|
||||
TDengine provides a rich set of application development interfaces. To facilitate users in quickly developing their applications, TDengine supports connectors for multiple programming languages. The official connectors include support for C/C++, Java, Python, Go, Node.js, C#, Rust, Lua (community contribution), and PHP (community contribution). These connectors support connecting to the TDengine cluster using the native interface (taosc) and REST interface (not supported in some languages yet). Community developers have also contributed several unofficial connectors, such as ADO.NET connector, Lua connector, and PHP connector. Additionally, TDengine can directly call the REST API provided by taosadapter for data writing and querying operations.
|
||||
|
||||
|
|
|
@ -18,14 +18,14 @@ create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
|||
The parameters are explained as follows.
|
||||
|
||||
- user_name: Up to 23 B long.
|
||||
- password: Up to 128 B long, valid characters include letters and numbers as well as special characters other than single and double quotes, apostrophes, backslashes, and spaces, and it cannot be empty.
|
||||
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
||||
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
|
||||
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. The default value is 0. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||
|
||||
The following SQL can create a user named test with the password 123456 who can view system information.
|
||||
The following SQL can create a user named test with the password abc123!@# who can view system information.
|
||||
|
||||
```sql
|
||||
create user test pass '123456' sysinfo 1
|
||||
create user test pass 'abc123!@#' sysinfo 1
|
||||
```
|
||||
|
||||
### Viewing Users
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Prometheus
|
|||
slug: /third-party-tools/data-collection/prometheus
|
||||
---
|
||||
|
||||
import Prometheus from "./_prometheus.mdx"
|
||||
import Prometheus from "../../assets/resources/_prometheus.mdx"
|
||||
|
||||
Prometheus is a popular open-source monitoring and alerting system. In 2016, Prometheus joined the Cloud Native Computing Foundation (CNCF), becoming the second hosted project after Kubernetes. The project has a very active developer and user community.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Telegraf
|
|||
slug: /third-party-tools/data-collection/telegraf
|
||||
---
|
||||
|
||||
import Telegraf from "./_telegraf.mdx"
|
||||
import Telegraf from "../../assets/resources/_telegraf.mdx"
|
||||
|
||||
Telegraf is a very popular open-source metric collection software. In data collection and platform monitoring systems, Telegraf can collect operational information from various components without the need to manually write scripts for periodic collection, reducing the difficulty of data acquisition.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: collectd
|
|||
slug: /third-party-tools/data-collection/collectd
|
||||
---
|
||||
|
||||
import CollectD from "./_collectd.mdx"
|
||||
import CollectD from "../../assets/resources/_collectd.mdx"
|
||||
|
||||
collectd is a daemon for collecting system performance. collectd provides various storage mechanisms to store different values. It periodically collects relevant statistical information about the system while it is running and storing information. Utilizing this information helps identify current system performance bottlenecks and predict future system loads.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: StatsD
|
|||
slug: /third-party-tools/data-collection/statsd
|
||||
---
|
||||
|
||||
import StatsD from "./_statsd.mdx"
|
||||
import StatsD from "../../assets/resources/_statsd.mdx"
|
||||
|
||||
StatsD is a simple daemon for aggregating and summarizing application metrics that has rapidly evolved in recent years into a unified protocol for collecting application performance metrics.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Icinga2
|
|||
slug: /third-party-tools/data-collection/icinga2
|
||||
---
|
||||
|
||||
import Icinga2 from "./_icinga2.mdx"
|
||||
import Icinga2 from "../../assets/resources/_icinga2.mdx"
|
||||
|
||||
icinga2 is an open-source host and network monitoring software, originally developed from the Nagios network monitoring application. Currently, icinga2 is released under the GNU GPL v2 license.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ title: TCollector
|
|||
slug: /third-party-tools/data-collection/tcollector
|
||||
---
|
||||
|
||||
import TCollector from "./_tcollector.mdx"
|
||||
import TCollector from "../../assets/resources/_tcollector.mdx"
|
||||
|
||||
TCollector is part of openTSDB, used for collecting client logs and sending them to the database.
|
||||
|
||||
|
|
|
@ -22,15 +22,11 @@ import imgStep11 from '../../assets/grafana-11.png';
|
|||
|
||||
This document describes how to integrate the TDengine data source with the open-source data visualization system [Grafana](https://www.grafana.com/) to achieve data visualization and build a monitoring and alert system. With the TDengine plugin, you can easily display data from TDengine tables on Grafana dashboards without the need for complex development work.
|
||||
|
||||
## Grafana Version Requirements
|
||||
|
||||
TDengine currently supports Grafana version 7.5 and above. It is recommended to use the latest version. Please download and install the corresponding version of Grafana according to your system environment.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To add the TDengine data source to Grafana normally, the following preparations are needed.
|
||||
|
||||
- Grafana service has been deployed and is running normally.
|
||||
- Grafana service has been deployed and is running normally. TDengine currently supports Grafana version 7.5 and above. It is recommended to use the latest version.
|
||||
**Note**: Ensure that the account starting Grafana has write permissions to its installation directory, otherwise you may not be able to install plugins later.
|
||||
- TDengine cluster has been deployed and is running normally.
|
||||
- taosAdapter has been installed and is running normally. For details, please refer to the [taosAdapter user manual](../../../tdengine-reference/components/taosadapter/)
|
||||
|
|
|
@ -13,13 +13,11 @@ Seeq is advanced analytics software for the manufacturing and Industrial Interne
|
|||
|
||||
Through the TDengine Java connector, Seeq can easily support querying time-series data provided by TDengine and offer data presentation, analysis, prediction, and other functions.
|
||||
|
||||
## Seeq Installation Method
|
||||
## Prerequisites
|
||||
|
||||
Download the relevant software from [Seeq's official website](https://www.seeq.com/customer-download), such as Seeq Server and Seeq Data Lab, etc. Seeq Data Lab needs to be installed on a different server from Seeq Server and interconnected through configuration. For detailed installation and configuration instructions, refer to the [Seeq Knowledge Base](https://support.seeq.com/kb/latest/cloud/).
|
||||
- Seeq has been installed. Download the relevant software from [Seeq's official website](https://www.seeq.com/customer-download), such as Seeq Server and Seeq Data Lab, etc. Seeq Data Lab needs to be installed on a different server from Seeq Server and interconnected through configuration. For detailed installation and configuration instructions, refer to the [Seeq Knowledge Base](https://support.seeq.com/kb/latest/cloud/).
|
||||
|
||||
### TDengine Local Instance Installation Method
|
||||
|
||||
Please refer to the [official documentation](../../../get-started).
|
||||
- TDengine local instance has been installed. Please refer to the [official documentation](../../../get-started). If using TDengine Cloud, please go to https://cloud.taosdata.com apply for an account and log in to see how to access TDengine Cloud.
|
||||
|
||||
## Configuring Seeq to Access TDengine
|
||||
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
---
|
||||
sidebar_label: Superset
|
||||
title: Integration With Superset
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
Apache Superset is a modern enterprise level business intelligence (BI) web application primarily used for data exploration and visualization.
|
||||
It is supported by the Apache Software Foundation and is an open source project with an active community and rich ecosystem.
|
||||
Apache Superset provides an intuitive user interface that makes creating, sharing, and visualizing data simple, while supporting multiple data sources and rich visualization options.
|
||||
|
||||
Through the Python connector of TDengine, Superset can support TDengine data sources and provide functions such as data presentation and analysis
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Prepare the following environment:
|
||||
- TDengine is installed and running normally (both Enterprise and Community versions are available)
|
||||
- taosAdapter is running normally, refer to [taosAdapter](../../../reference/components/taosAdapter)
|
||||
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/)
|
||||
|
||||
## Install TDengine Python Connector
|
||||
|
||||
The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services.
|
||||
The connection uses the WebSocket protocol, so it is necessary to install the `taos-ws-py` component of TDengine separately. The complete installation script is as follows:
|
||||
```bash
|
||||
pip3 install taospy
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
|
||||
## Configure TDengine Connection In Superset
|
||||
|
||||
**Step 1**, enter the new database connection page, "Superset" → "Setting" → "Database Connections" → "+DATABASE"
|
||||
**Step 2**, select TDengine database connection, select the "TDengine" option from the drop-down list of "SUPPORTED DATABASES".
|
||||
:::tip
|
||||
If there is no TDengine option in the drop-down list, please confirm that the steps of installing, `Superset` is first and `Python Connector` is second.
|
||||
:::
|
||||
**Step 3**, write a name of connection in "DISPLAY NAME"
|
||||
**Step 4**, The "SQLALCHEMY URL" field is a key connection information string, and it must be filled in correctly
|
||||
```bash
|
||||
taosws://user:password@host:port
|
||||
```
|
||||
| Parameter | <center>Parameter Description</center> |
|
||||
|:---------- |:--------------------------------------------------------- |
|
||||
|user | Username for logging into TDengine database |
|
||||
|password | Password for logging into TDengine database |
|
||||
|host | Name of the host where the TDengine database is located |
|
||||
|port | The port that provides WebSocket services, default is 6041 |
|
||||
|
||||
Example:
|
||||
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, "SQLALCHEMY URL" is:
|
||||
```bash
|
||||
taosws://root:taosdata@localhost:6041
|
||||
```
|
||||
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
|
||||
1. Click the "+" button in the upper right corner of the Superset interface, select "SQL query", and enter the query interface
|
||||
2. Select the "TDengine" data source that has been created earlier from the dropdown list of "DATABASES" in the upper left corner
|
||||
3. Select the name of the database to be operated on from the drop-down list of "SCHEMA" (system libraries are not displayed)
|
||||
4. "SEE TABLE SCHEMA" select the name of the super table or regular table to be operated on (sub tables are not displayed)
|
||||
5. Subsequently, the schema information of the selected table will be displayed in the following area
|
||||
6. In the SQL editor area, any SQL statement that conforms to TDengine syntax can be entered for execution
|
||||
|
||||
## Example
|
||||
|
||||
We chose two popular templates from the Superset Chart template to showcase their effects, using smart meter data as an example:
|
||||
|
||||
1. "Aggregate" Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4
|
||||
|
||||

|
||||
|
||||
2. "RAW RECORDS" Type, which displays the collected values of current and voltage during the specified time period in Group 4
|
||||
|
||||

|
Binary file not shown.
After Width: | Height: | Size: 650 KiB |
Binary file not shown.
After Width: | Height: | Size: 784 KiB |
|
@ -28,68 +28,70 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|
||||
### Connection Related
|
||||
|
||||
|Parameter Name |Supported Version |Description|
|
||||
|-----------------------|-------------------------|------------|
|
||||
|firstEp | |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
||||
|secondEp | |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
||||
|fqdn | |The service address that taosd listens on, default is the first hostname configured on the server|
|
||||
|serverPort | |The port that taosd listens on, default value 6030|
|
||||
|compressMsgSize | |Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
||||
|shellActivityTimer | |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|rpcQueueMemoryAllowed | |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
||||
|resolveFQDNRetryTime | Cancelled after 3.x |Number of retries when FQDN resolution fails|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|firstEp | |Not supported |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
||||
|secondEp | |Not supported |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
||||
|fqdn | |Not supported |The service address that taosd listens on, default is the first hostname configured on the server|
|
||||
|serverPort | |Not supported |The port that taosd listens on, default value 6030|
|
||||
|compressMsgSize | |Supported, effective after restart|Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
||||
|shellActivityTimer | |Supported, effective immediately |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
||||
|numOfRpcSessions | |Supported, effective after restart|Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Supported, effective after restart|Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Supported, effective after restart|Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|rpcQueueMemoryAllowed | |Supported, effective immediately |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
||||
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
||||
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|maxShellConns | Cancelled after 3.x |Maximum number of connections allowed|
|
||||
|maxRetryWaitTime | |Maximum timeout for reconnection, default value is 10s|
|
||||
|shareConnLimit |Added in 3.3.4.0 |Number of requests a connection can share, range 1-512, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0 |Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
||||
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection, default value is 10s|
|
||||
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||
|
||||
### Monitoring Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|-----------------------|----------|-|
|
||||
|monitor | |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
||||
|monitorFqdn | |The FQDN of the server where the taosKeeper service is located, default value none|
|
||||
|monitorPort | |The port number listened to by the taosKeeper service, default value 6043|
|
||||
|monitorInterval | |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
||||
|monitorMaxLogs | |Number of cached logs pending report|
|
||||
|monitorComp | |Whether to use compression when reporting monitoring logs|
|
||||
|monitorLogProtocol | |Whether to print monitoring logs|
|
||||
|monitorForceV2 | |Whether to use V2 protocol for reporting|
|
||||
|telemetryReporting | |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
||||
|telemetryServer | |Telemetry server address|
|
||||
|telemetryPort | |Telemetry server port number|
|
||||
|telemetryInterval | |Telemetry upload interval, in seconds, default 43200|
|
||||
|crashReporting | |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|monitor | |Supported, effective immediately |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
||||
|monitorFqdn | |Supported, effective after restart|The FQDN of the server where the taosKeeper service is located, default value none|
|
||||
|monitorPort | |Supported, effective after restart|The port number listened to by the taosKeeper service, default value 6043|
|
||||
|monitorInterval | |Supported, effective immediately |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
||||
|monitorMaxLogs | |Supported, effective immediately |Number of cached logs pending report|
|
||||
|monitorComp | |Supported, effective after restart|Whether to use compression when reporting monitoring logs|
|
||||
|monitorLogProtocol | |Supported, effective immediately |Whether to print monitoring logs|
|
||||
|monitorForceV2 | |Supported, effective immediately |Whether to use V2 protocol for reporting|
|
||||
|telemetryReporting | |Supported, effective immediately |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
||||
|telemetryServer | |Not supported |Telemetry server address|
|
||||
|telemetryPort | |Not supported |Telemetry server port number|
|
||||
|telemetryInterval | |Supported, effective immediately |Telemetry upload interval, in seconds, default 86400|
|
||||
|crashReporting | |Supported, effective immediately |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
||||
|
||||
### Query Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|------------------------|----------|-|
|
||||
|countAlwaysReturnValue | |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
||||
|tagFilterCache | |Whether to cache tag filter results|
|
||||
|maxNumOfDistinctRes | |Maximum number of distinct results allowed to return, default value 100,000, maximum allowed value 100 million|
|
||||
|queryBufferSize | |Not effective yet|
|
||||
|queryRspPolicy | |Query response strategy|
|
||||
|filterScalarMode | |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables| |Internal parameter, concurrency number of the query plan|
|
||||
|queryRsmaTolerance | |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
||||
|pqSortMemThreshold | |Internal parameter, memory threshold for sorting|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|countAlwaysReturnValue | |Supported, effective immediately |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
||||
|tagFilterCache | |Not supported |Whether to cache tag filter results|
|
||||
|queryBufferSize | |Supported, effective after restart|Not effective yet|
|
||||
|queryRspPolicy | |Supported, effective immediately |Query response strategy|
|
||||
|queryUseMemoryPool | |Not supported |Whether query will use memory pool to manage memory, default value: 1 (on); 0: off, 1: on|
|
||||
|minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000|
|
||||
|singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000|
|
||||
|filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan|
|
||||
|queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting|
|
||||
|
||||
### Region Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|-----------------|----------|-|
|
||||
|timezone | |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | |Character set encoding, defaults to obtaining from the system|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|timezone | |Not supported |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | |Not supported |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | |Not supported |Character set encoding, defaults to obtaining from the system|
|
||||
|
||||
:::info
|
||||
|
||||
|
@ -167,152 +169,153 @@ The effective value of charset is UTF-8.
|
|||
|
||||
### Storage Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|--------------------|----------|-|
|
||||
|dataDir | |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
||||
|tempDir | |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||
|minimalDataDirGB | |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||
|minimalTmpDirGB | |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||
|minDiskFreeSize |After 3.1.1.0|When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||
|s3MigrateIntervalSec|After 3.3.4.3|Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||
|s3MigrateEnabled |After 3.3.4.3|Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||
|s3Accesskey |After 3.3.4.3|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||
|s3Endpoint |After 3.3.4.3|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
||||
|s3BucketName |After 3.3.4.3|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
||||
|s3PageCacheSize |After 3.3.4.3|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
||||
|s3UploadDelaySec |After 3.3.4.3|How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
||||
|cacheLazyLoadThreshold| |Internal parameter, cache loading strategy|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|dataDir | |Not supported |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
||||
|tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||
|minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||
|minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||
|s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||
|s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||
|s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||
|s3Endpoint |After 3.3.4.3|Supported, effective after restart|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
||||
|s3BucketName |After 3.3.4.3|Supported, effective after restart|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
||||
|s3PageCacheSize |After 3.3.4.3|Supported, effective after restart|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
||||
|s3UploadDelaySec |After 3.3.4.3|Supported, effective immediately |How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
||||
|cacheLazyLoadThreshold| |Supported, effective immediately |Internal parameter, cache loading strategy|
|
||||
|
||||
### Cluster Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|--------------------------|----------|-|
|
||||
|supportVnodes | |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
||||
|numOfCommitThreads | |Maximum number of commit threads, range 0-1024, default value 4|
|
||||
|numOfMnodeReadThreads | |Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeQueryThreads | |Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfVnodeFetchThreads | |Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeRsmaThreads | |Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfQnodeQueryThreads | |Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfSnodeSharedThreads | |Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|numOfSnodeUniqueThreads | |Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|ratioOfVnodeStreamThreads | |Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
||||
|ttlUnit | |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
||||
|ttlPushInterval | |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
||||
|ttlChangeOnWrite | |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
||||
|ttlBatchDropNum | |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
||||
|retentionSpeedLimitMB | |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
||||
|maxTsmaNum | |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
||||
|tmqMaxTopicNum | |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
||||
|tmqRowSize | |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
||||
|audit | |Audit feature switch; Enterprise parameter|
|
||||
|auditInterval | |Time interval for reporting audit data; Enterprise parameter|
|
||||
|auditCreateTable | |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
||||
|encryptAlgorithm | |Data encryption algorithm; Enterprise parameter|
|
||||
|encryptScope | |Encryption scope; Enterprise parameter|
|
||||
|enableWhiteList | |Switch for whitelist feature; Enterprise parameter|
|
||||
|syncLogBufferMemoryAllowed| |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
||||
|syncElectInterval | |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatInterval | |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatTimeout | |Internal parameter, for debugging synchronization module|
|
||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
||||
|arbHeartBeatIntervalSec | |Internal parameter, for debugging synchronization module|
|
||||
|arbCheckSyncIntervalSec | |Internal parameter, for debugging synchronization module|
|
||||
|arbSetAssignedTimeoutSec | |Internal parameter, for debugging synchronization module|
|
||||
|mndSdbWriteDelta | |Internal parameter, for debugging mnode module|
|
||||
|mndLogRetention | |Internal parameter, for debugging mnode module|
|
||||
|skipGrant | |Internal parameter, for authorization checks|
|
||||
|trimVDbIntervalSec | |Internal parameter, for deleting expired data|
|
||||
|ttlFlushThreshold | |Internal parameter, frequency of ttl timer|
|
||||
|compactPullupInterval | |Internal parameter, frequency of data reorganization timer|
|
||||
|walFsyncDataSizeLimit | |Internal parameter, threshold for WAL to perform FSYNC|
|
||||
|transPullupInterval | |Internal parameter, retry interval for mnode to execute transactions|
|
||||
|mqRebalanceInterval | |Internal parameter, interval for consumer rebalancing|
|
||||
|uptimeInterval | |Internal parameter, for recording system uptime|
|
||||
|timeseriesThreshold | |Internal parameter, for usage statistics|
|
||||
|udf | |Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||
|udfdResFuncs | |Internal parameter, for setting UDF result sets|
|
||||
|udfdLdLibPath | |Internal parameter, indicates the library path for loading UDF|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|supportVnodes | |Supported, effective immediately |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
||||
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 0-1024, default value 4|
|
||||
|numOfMnodeReadThreads | |Supported, effective after restart|Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeQueryThreads | |Supported, effective after restart|Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfVnodeFetchThreads | |Supported, effective after restart|Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeRsmaThreads | |Supported, effective after restart|Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfQnodeQueryThreads | |Supported, effective after restart|Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfSnodeSharedThreads | |Supported, effective after restart|Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|numOfSnodeUniqueThreads | |Supported, effective after restart|Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|ratioOfVnodeStreamThreads | |Supported, effective after restart|Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
||||
|ttlUnit | |Not supported |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
||||
|ttlPushInterval | |Supported, effective immediately |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
||||
|ttlChangeOnWrite | |Supported, effective immediately |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
||||
|ttlBatchDropNum | |Supported, effective immediately |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
||||
|retentionSpeedLimitMB | |Supported, effective immediately |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
||||
|maxTsmaNum | |Supported, effective immediately |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
||||
|tmqMaxTopicNum | |Supported, effective immediately |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
||||
|tmqRowSize | |Supported, effective immediately |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
||||
|audit | |Supported, effective immediately |Audit feature switch; Enterprise parameter|
|
||||
|auditInterval | |Supported, effective immediately |Time interval for reporting audit data; Enterprise parameter|
|
||||
|auditCreateTable | |Supported, effective immediately |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
||||
|encryptAlgorithm | |Not supported |Data encryption algorithm; Enterprise parameter|
|
||||
|encryptScope | |Not supported |Encryption scope; Enterprise parameter|
|
||||
|enableWhiteList | |Supported, effective immediately |Switch for whitelist feature; Enterprise parameter|
|
||||
|syncLogBufferMemoryAllowed| |Supported, effective immediately |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
||||
|syncElectInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatTimeout | |Not supported |Internal parameter, for debugging synchronization module|
|
||||
|syncSnapReplMaxWaitN | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|arbHeartBeatIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|arbCheckSyncIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|arbSetAssignedTimeoutSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|mndSdbWriteDelta | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||
|mndLogRetention | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||
|skipGrant | |Not supported |Internal parameter, for authorization checks|
|
||||
|trimVDbIntervalSec | |Supported, effective immediately |Internal parameter, for deleting expired data|
|
||||
|ttlFlushThreshold | |Supported, effective immediately |Internal parameter, frequency of ttl timer|
|
||||
|compactPullupInterval | |Supported, effective immediately |Internal parameter, frequency of data reorganization timer|
|
||||
|walFsyncDataSizeLimit | |Supported, effective immediately |Internal parameter, threshold for WAL to perform FSYNC|
|
||||
|transPullupInterval | |Supported, effective immediately |Internal parameter, retry interval for mnode to execute transactions|
|
||||
|mqRebalanceInterval | |Supported, effective immediately |Internal parameter, interval for consumer rebalancing|
|
||||
|uptimeInterval | |Supported, effective immediately |Internal parameter, for recording system uptime|
|
||||
|timeseriesThreshold | |Supported, effective immediately |Internal parameter, for usage statistics|
|
||||
|udf | |Supported, effective after restart|Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||
|udfdResFuncs | |Supported, effective after restart|Internal parameter, for setting UDF result sets|
|
||||
|udfdLdLibPath | |Supported, effective after restart|Internal parameter, indicates the library path for loading UDF|
|
||||
|
||||
### Stream Computing Parameters
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|-----------------------|----------|-|
|
||||
| disableStream | | Switch to enable or disable stream computing |
|
||||
| streamBufferSize | | Controls the size of the window state cache in memory, default value is 128MB |
|
||||
| streamAggCnt | | Internal parameter, number of concurrent aggregation computations |
|
||||
| checkpointInterval | | Internal parameter, checkpoint synchronization interval |
|
||||
| concurrentCheckpoint | | Internal parameter, whether to check checkpoints concurrently |
|
||||
| maxStreamBackendCache | | Internal parameter, maximum cache used by stream computing |
|
||||
| streamSinkDataRate | | Internal parameter, used to control the write speed of stream computing results |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| disableStream | |Supported, effective immediately | Switch to enable or disable stream computing |
|
||||
| streamBufferSize | |Supported, effective immediately | Controls the size of the window state cache in memory, default value is 128MB |
|
||||
| streamAggCnt | |Not supported | Internal parameter, number of concurrent aggregation computations |
|
||||
| checkpointInterval | |Supported, effective after restart| Internal parameter, checkpoint synchronization interval |
|
||||
| concurrentCheckpoint | |Supported, effective immediately | Internal parameter, whether to check checkpoints concurrently |
|
||||
| maxStreamBackendCache | |Supported, effective immediately | Internal parameter, maximum cache used by stream computing |
|
||||
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
|
||||
|
||||
### Log Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------|----------|-|
|
||||
| logDir | | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
||||
| minimalLogDirGB | | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
||||
| logKeepDays | | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| slowLogThreshold| 3.3.3.0 onwards | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
||||
| slowLogMaxLen | 3.3.3.0 onwards | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
||||
| slowLogScope | 3.3.3.0 onwards | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
||||
| slowLogExceptDb | 3.3.3.0 onwards | Specifies the database that does not report slow queries, only supports configuring one database |
|
||||
| debugFlag | | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | | Log switch for the timer module, range as above |
|
||||
| uDebugFlag | | Log switch for the utility module, range as above |
|
||||
| rpcDebugFlag | | Log switch for the rpc module, range as above |
|
||||
| qDebugFlag | | Log switch for the query module, range as above |
|
||||
| dDebugFlag | | Log switch for the dnode module, range as above |
|
||||
| vDebugFlag | | Log switch for the vnode module, range as above |
|
||||
| mDebugFlag | | Log switch for the mnode module, range as above |
|
||||
| azDebugFlag | 3.3.4.3 onwards | Log switch for the S3 module, range as above |
|
||||
| sDebugFlag | | Log switch for the sync module, range as above |
|
||||
| tsdbDebugFlag | | Log switch for the tsdb module, range as above |
|
||||
| tqDebugFlag | | Log switch for the tq module, range as above |
|
||||
| fsDebugFlag | | Log switch for the fs module, range as above |
|
||||
| udfDebugFlag | | Log switch for the udf module, range as above |
|
||||
| smaDebugFlag | | Log switch for the sma module, range as above |
|
||||
| idxDebugFlag | | Log switch for the index module, range as above |
|
||||
| tdbDebugFlag | | Log switch for the tdb module, range as above |
|
||||
| metaDebugFlag | | Log switch for the meta module, range as above |
|
||||
| stDebugFlag | | Log switch for the stream module, range as above |
|
||||
| sndDebugFlag | | Log switch for the snode module, range as above |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
||||
| minimalLogDirGB | |Not supported | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
||||
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
||||
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
||||
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| slowLogThreshold| 3.3.3.0 onwards |Supported, effective immediately | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
||||
| slowLogMaxLen | 3.3.3.0 onwards |Supported, effective immediately | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
||||
| slowLogScope | 3.3.3.0 onwards |Supported, effective immediately | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
||||
| slowLogExceptDb | 3.3.3.0 onwards |Supported, effective immediately | Specifies the database that does not report slow queries, only supports configuring one database |
|
||||
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, range as above |
|
||||
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, range as above |
|
||||
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, range as above |
|
||||
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, range as above |
|
||||
| dDebugFlag | |Supported, effective immediately | Log switch for the dnode module, range as above |
|
||||
| vDebugFlag | |Supported, effective immediately | Log switch for the vnode module, range as above |
|
||||
| mDebugFlag | |Supported, effective immediately | Log switch for the mnode module, range as above |
|
||||
| azDebugFlag | 3.3.4.3 onwards |Supported, effective immediately | Log switch for the S3 module, range as above |
|
||||
| sDebugFlag | |Supported, effective immediately | Log switch for the sync module, range as above |
|
||||
| tsdbDebugFlag | |Supported, effective immediately | Log switch for the tsdb module, range as above |
|
||||
| tqDebugFlag | |Supported, effective immediately | Log switch for the tq module, range as above |
|
||||
| fsDebugFlag | |Supported, effective immediately | Log switch for the fs module, range as above |
|
||||
| udfDebugFlag | |Supported, effective immediately | Log switch for the udf module, range as above |
|
||||
| smaDebugFlag | |Supported, effective immediately | Log switch for the sma module, range as above |
|
||||
| idxDebugFlag | |Supported, effective immediately | Log switch for the index module, range as above |
|
||||
| tdbDebugFlag | |Supported, effective immediately | Log switch for the tdb module, range as above |
|
||||
| metaDebugFlag | |Supported, effective immediately | Log switch for the meta module, range as above |
|
||||
| stDebugFlag | |Supported, effective immediately | Log switch for the stream module, range as above |
|
||||
| sndDebugFlag | |Supported, effective immediately | Log switch for the snode module, range as above |
|
||||
|
||||
### Debugging Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------------|-------------------|-------------|
|
||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
||||
| configDir | | Directory where the configuration files are located |
|
||||
| scriptDir | | Directory for internal test tool scripts |
|
||||
| assert | | Assertion control switch, default value is 0 |
|
||||
| randErrorChance | | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | | Internal parameter, used for random failure testing |
|
||||
| experimental | | Internal parameter, used for some experimental features |
|
||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
||||
| rsyncPort | | Internal parameter, used for debugging stream computing |
|
||||
| snodeAddress | | Internal parameter, used for debugging stream computing |
|
||||
| checkpointBackupDir | | Internal parameter, used for restoring snode data |
|
||||
| enableAuditDelete | | Internal parameter, used for testing audit functions |
|
||||
| slowLogThresholdTest | | Internal parameter, used for testing slow logs |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
||||
| configDir | |Not supported | Directory where the configuration files are located |
|
||||
|forceReadConfig | |Not supported ||Force the use of parameters from the configuration file,default value: 0|
|
||||
| scriptDir | |Not supported | Directory for internal test tool scripts |
|
||||
| assert | |Not supported | Assertion control switch, default value is 0 |
|
||||
| randErrorChance | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| experimental | |Supported, effective immediately | Internal parameter, used for some experimental features |
|
||||
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||
| rsyncPort | |Not supported | Internal parameter, used for debugging stream computing |
|
||||
| snodeAddress | |Supported, effective immediately | Internal parameter, used for debugging stream computing |
|
||||
| checkpointBackupDir | |Supported, effective immediately | Internal parameter, used for restoring snode data |
|
||||
| enableAuditDelete | |Not supported | Internal parameter, used for testing audit functions |
|
||||
| slowLogThresholdTest | |Not supported | Internal parameter, used for testing slow logs |
|
||||
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||
|
||||
### Compression Parameters
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------|-------------------|-------------|
|
||||
| fPrecision | | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| dPrecision | | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| lossyColumn | Before 3.3.0.0 | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
||||
| ifAdtFse | | When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
||||
| maxRange | | Internal parameter, used for setting lossy compression |
|
||||
| curRange | | Internal parameter, used for setting lossy compression |
|
||||
| compressor | | Internal parameter, used for setting lossy compression |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| fPrecision | |Supported, effective immediately | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| dPrecision | |Supported, effective immediately | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| lossyColumn | Before 3.3.0.0 |Not supported | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
||||
| ifAdtFse | |Supported, effective after restart| When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
||||
| maxRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||
| curRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||
| compressor | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||
|
||||
**Additional Notes**
|
||||
|
||||
|
|
|
@ -10,107 +10,109 @@ The TDengine client driver provides all the APIs needed for application programm
|
|||
|
||||
### Connection Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|----------------------|----------|-------------|
|
||||
|firstEp | |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
||||
|secondEp | |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
||||
|compressMsgSize | |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
||||
|shellActivityTimer | |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|timeToGetAvailableConn| Cancelled after 3.3.4.* |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|useAdapter | |Internal parameter, whether to use taosadapter, affects CSV file import|
|
||||
|shareConnLimit |Added in 3.3.4.0|Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0|Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
|firstEp | |Supported, effective immediately |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
||||
|secondEp | |Supported, effective immediately |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
||||
|compressMsgSize | |Supported, effective immediately |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
||||
|shellActivityTimer | |Not supported |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
||||
|numOfRpcSessions | |Supported, effective immediately |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Not supported |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Not supported |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|timeToGetAvailableConn| Cancelled after 3.3.4.* |Not supported |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|useAdapter | |Supported, effective immediately |Internal parameter, whether to use taosadapter, affects CSV file import|
|
||||
|shareConnLimit |Added in 3.3.4.0|Not supported |Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0|Not supported |Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
||||
|
||||
### Query Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|---------------------------------|---------|-|
|
||||
|countAlwaysReturnValue | |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
||||
|keepColumnName | |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
||||
|multiResultFunctionStarReturnTags|After 3.3.3.0|When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
||||
|metaCacheMaxSize | |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
||||
|maxTsmaCalcDelay | |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
||||
|tsmaDataDeleteMark | |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
||||
|queryPolicy | |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
||||
|queryTableNotExistAsEmpty | |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
||||
|querySmaOptimize | |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables | |Internal parameter, concurrency number of the query plan|
|
||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
||||
|minSlidingTime | |Internal parameter, minimum allowable value for sliding|
|
||||
|minIntervalTime | |Internal parameter, minimum allowable value for interval|
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
|countAlwaysReturnValue | |Supported, effective immediately |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
||||
|keepColumnName | |Supported, effective immediately |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
||||
|multiResultFunctionStarReturnTags|After 3.3.3.0|Supported, effective immediately |When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
||||
|metaCacheMaxSize | |Supported, effective immediately |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
||||
|maxTsmaCalcDelay | |Supported, effective immediately |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
||||
|tsmaDataDeleteMark | |Supported, effective immediately |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
||||
|queryPolicy | |Supported, effective immediately |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
||||
|queryTableNotExistAsEmpty | |Supported, effective immediately |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
||||
|querySmaOptimize | |Supported, effective immediately |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
||||
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables | |Not supported |Internal parameter, concurrency number of the query plan|
|
||||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||
|
||||
### Writing Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|---------------------------------|-------------------|-------------|
|
||||
| smlChildTableName | | Key for custom child table name in schemaless, no default value |
|
||||
| smlAutoChildTableNameDelimiter | | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
||||
| smlTagName | | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
||||
| smlTsDefaultName | | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
||||
| smlDot2Underline | | Converts dots in supertable names to underscores in schemaless |
|
||||
| maxInsertBatchRows | | Internal parameter, maximum number of rows per batch insert |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| smlChildTableName | |Supported, effective immediately | Key for custom child table name in schemaless, no default value |
|
||||
| smlAutoChildTableNameDelimiter | |Supported, effective immediately | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
||||
| smlTagName | |Supported, effective immediately | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
||||
| smlTsDefaultName | |Supported, effective immediately | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
||||
| smlDot2Underline | |Supported, effective immediately | Converts dots in supertable names to underscores in schemaless |
|
||||
| maxInsertBatchRows | |Supported, effective immediately | Internal parameter, maximum number of rows per batch insert |
|
||||
|
||||
### Region Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------|-------------------|-------------|
|
||||
| timezone | | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
||||
| locale | | System locale and encoding format, defaults to system settings |
|
||||
| charset | | Character set encoding, defaults to system settings |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| timezone | |Supported, effective immediately | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
||||
| locale | |Supported, effective immediately | System locale and encoding format, defaults to system settings |
|
||||
| charset | |Supported, effective immediately | Character set encoding, defaults to system settings |
|
||||
|
||||
### Storage Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|-----------------|-------------------|-------------|
|
||||
| tempDir | | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
||||
| minimalTmpDirGB | | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| tempDir | |Supported, effective immediately | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
||||
| minimalTmpDirGB | |Supported, effective immediately | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
||||
|
||||
### Log Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|------------------|-------------------|-------------|
|
||||
| logDir | | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
||||
| minimalLogDirGB | | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
||||
| logKeepDays | | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| debugFlag | | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | | Log switch for the timer module, value range as above |
|
||||
| uDebugFlag | | Log switch for the utility module, value range as above |
|
||||
| rpcDebugFlag | | Log switch for the rpc module, value range as above |
|
||||
| jniDebugFlag | | Log switch for the jni module, value range as above |
|
||||
| qDebugFlag | | Log switch for the query module, value range as above |
|
||||
| cDebugFlag | | Log switch for the client module, value range as above |
|
||||
| simDebugFlag | | Internal parameter, log switch for the test tool, value range as above |
|
||||
| tqClientDebugFlag| After 3.3.4.3 | Log switch for the client module, value range as above |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
||||
| minimalLogDirGB | |Supported, effective immediately | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
||||
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
||||
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
||||
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, value range as above |
|
||||
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, value range as above |
|
||||
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, value range as above |
|
||||
| jniDebugFlag | |Supported, effective immediately | Log switch for the jni module, value range as above |
|
||||
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, value range as above |
|
||||
| cDebugFlag | |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||
| simDebugFlag | |Supported, effective immediately | Internal parameter, log switch for the test tool, value range as above |
|
||||
| tqClientDebugFlag| After 3.3.4.3 |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||
|
||||
### Debugging Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|------------------|-------------------|-------------|
|
||||
| crashReporting | | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
||||
| assert | | Assertion control switch, default value: 0 |
|
||||
| configDir | | Directory for configuration files |
|
||||
| scriptDir | | Internal parameter, directory for test cases |
|
||||
| randErrorChance | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| crashReporting | |Supported, effective immediately | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
||||
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
||||
| assert | |Not supported | Assertion control switch, default value: 0 |
|
||||
| configDir | |Not supported | Directory for configuration files |
|
||||
| scriptDir | |Not supported | Internal parameter, directory for test cases |
|
||||
| randErrorChance | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||
|
||||
|
||||
### SHELL Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|-----------------|----------|-|
|
||||
|enableScience | |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
|enableScience | |Not supported |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
||||
|
||||
## API
|
||||
|
||||
|
|
|
@ -6,11 +6,11 @@ slug: /tdengine-reference/components/taosadapter
|
|||
|
||||
import Image from '@theme/IdealImage';
|
||||
import imgAdapter from '../../assets/taosadapter-01.png';
|
||||
import Prometheus from "../../10-third-party/01-collection/_prometheus.mdx"
|
||||
import CollectD from "../../10-third-party/01-collection/_collectd.mdx"
|
||||
import StatsD from "../../10-third-party/01-collection/_statsd.mdx"
|
||||
import Icinga2 from "../../10-third-party/01-collection/_icinga2.mdx"
|
||||
import TCollector from "../../10-third-party/01-collection/_tcollector.mdx"
|
||||
import Prometheus from "../../assets/resources/_prometheus.mdx"
|
||||
import CollectD from "../../assets/resources/_collectd.mdx"
|
||||
import StatsD from "../../assets/resources/_statsd.mdx"
|
||||
import Icinga2 from "../../assets/resources/_icinga2.mdx"
|
||||
import TCollector from "../../assets/resources/_tcollector.mdx"
|
||||
|
||||
taosAdapter is a companion tool for TDengine, serving as a bridge and adapter between the TDengine cluster and applications. It provides an easy and efficient way to ingest data directly from data collection agents (such as Telegraf, StatsD, collectd, etc.). It also offers InfluxDB/OpenTSDB compatible data ingestion interfaces, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine.
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ The default configuration file for `Agent` is located at `/etc/taos/agent.toml`,
|
|||
- `token`: Required, the Token generated when creating `Agent` in `Explorer`.
|
||||
- `instanceId`: The instance ID of the current taosx-agent service. If multiple taosx-agent instances are started on the same machine, it is necessary to ensure that the instance IDs of each instance are unique.
|
||||
- `compression`: Optional, can be configured as `true` or `false`, default is `false`. If set to `true`, it enables data compression in communication between `Agent` and `taosX`.
|
||||
- `in_memory_cache_capacity`: Optional, signifies the maximum number of message batches that can be cached in memory and can be configured as a positive integer greater than zero. The default value is set at 64.
|
||||
- `log_level`: Optional, log level, default is `info`. Like `taosX`, it supports five levels: `error`, `warn`, `info`, `debug`, `trace`. Deprecated, please use `log.level` instead.
|
||||
- `log_keep_days`: Optional, the number of days to keep logs, default is `30` days. Deprecated, please use `log.keepDays` instead.
|
||||
- `log.path`: The directory where log files are stored.
|
||||
|
@ -45,6 +46,10 @@ As shown below:
|
|||
#
|
||||
#compression = true
|
||||
|
||||
# In-memory cache capacity
|
||||
#
|
||||
#in_memory_cache_capacity = 64
|
||||
|
||||
# log configuration
|
||||
[log]
|
||||
# All log files are stored in this directory
|
||||
|
|
|
@ -171,7 +171,37 @@ Metric details:
|
|||
5. **Writes**: Total number of writes
|
||||
6. **Other**: Total number of other requests
|
||||
|
||||
There are also line charts for the above categories.
|
||||
There are also line charts for the above categories.
|
||||
|
||||
### Automatic import of preconfigured alert rules
|
||||
|
||||
After summarizing user experience, 14 commonly used alert rules are sorted out. These alert rules can monitor key indicators of the TDengine cluster and report alerts, such as abnormal and exceeded indicators.
|
||||
Starting from TDengine-Server 3.3.4.3 (TDengine-datasource 3.6.3), TDengine Datasource supports automatic import of preconfigured alert rules. You can import 14 alert rules to Grafana (version 11 or later) with one click.
|
||||
In the TDengine-datasource setting interface, turn on the "Load Tengine Alert" switch, click the "Save & test" button, the plugin will automatically load the mentioned 14 alert rules. The rules will be placed in the Grafana alerts directory. If not required, turn off the "Load TDengine Alert" switch, and click the button next to "Clear TDengine Alert" to clear all the alert rules imported into this data source.
|
||||
|
||||
After importing, click on "Alert rules" on the left side of the Grafana interface to view all current alert rules. By configuring contact points, users can receive alert notifications.
|
||||
|
||||
The specific configuration of the 14 alert rules is as follows:
|
||||
|
||||
| alert rule| Rule threshold| Behavior when no data | Data scanning interval |Duration | SQL |
|
||||
| ------ | --------- | ---------------- | ----------- |------- |----------------------|
|
||||
|CPU load of dnode node|average > 80%|Trigger alert|5 minutes|5 minutes |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `|
|
||||
|Memory of dnode node |average > 60%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts <now partition by dnode_id`|
|
||||
|Disk capacity occupancy of dnode nodes | > 80%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`|
|
||||
|Authorization expires |< 60天|Trigger alert|1 day|0 0 seconds|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||
|The used measurement points has reached the authorized number|>= 90%|Trigger alert|1 day|0 seconds|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`|
|
||||
|Number of concurrent query requests | > 100|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`|
|
||||
|Maximum time for slow query execution (no time window) |> 300秒|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`|
|
||||
|dnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`|
|
||||
|vnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||
|Number of data deletion requests |> 0|Do not trigger alert|30 seconds|0 seconds|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``|
|
||||
|Adapter RESTful request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``|
|
||||
|Adapter WebSocket request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``|
|
||||
|Dnode data reporting is missing |< 3|Trigger alert|180 seconds|0 seconds|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`|
|
||||
|Restart dnode |max(update_time) > last(update_time)|Trigger alert|90 seconds|0 seconds|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`|
|
||||
|
||||
TDengine users can modify and improve these alert rules according to their own business needs. In Grafana 7.5 and below versions, the Dashboard and Alert rules functions are combined, while in subsequent new versions, the two functions are separated. To be compatible with Grafana7.5 and below versions, an Alert Used Only panel has been added to the TDinsight panel, which is only required for Grafana7.5 and below versions.
|
||||
|
||||
|
||||
## Upgrade
|
||||
|
||||
|
|
|
@ -4,22 +4,17 @@ sidebar_label: taosdump
|
|||
slug: /tdengine-reference/tools/taosdump
|
||||
---
|
||||
|
||||
taosdump is a tool application that supports backing up data from a running TDengine cluster and restoring the backed-up data to the same or another running TDengine cluster.
|
||||
|
||||
taosdump can back up data using databases, supertables, or basic tables as logical data units, and can also back up data records within a specified time period from databases, supertables, and basic tables. You can specify the directory path for data backup; if not specified, taosdump defaults to backing up data to the current directory.
|
||||
|
||||
If the specified location already has data files, taosdump will prompt the user and exit immediately to avoid data being overwritten. This means the same path can only be used for one backup.
|
||||
If you see related prompts, please operate carefully.
|
||||
|
||||
taosdump is a logical backup tool, it should not be used to back up any raw data, environment settings, hardware information, server configuration, or cluster topology. taosdump uses [Apache AVRO](https://avro.apache.org/) as the data file format to store backup data.
|
||||
`taosdump` is a TDengine data backup/recovery tool provided for open source users, and the backed up data files adopt the standard [Apache AVRO](https://avro.apache.org/)
|
||||
Format, convenient for exchanging data with the external ecosystem.
|
||||
Taosdump provides multiple data backup and recovery options to meet different data needs, and all supported options can be viewed through --help.
|
||||
|
||||
## Installation
|
||||
|
||||
There are two ways to install taosdump:
|
||||
Taosdump provides two installation methods:
|
||||
|
||||
- Install the official taosTools package, please find taosTools on the [release history page](../../../release-history/taostools/) and download it for installation.
|
||||
- Taosdump is the default installation component in the TDengine installation package, which can be used after installing TDengine. For how to install TDengine, please refer to [TDengine Installation](../../../get-started/)
|
||||
|
||||
- Compile taos-tools separately and install, please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||
- Compile and install taos tools separately, refer to [taos tools](https://github.com/taosdata/taos-tools) .
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
|
@ -30,6 +25,9 @@ There are two ways to install taosdump:
|
|||
3. Backup certain supertables or basic tables in a specified database: use the `dbname stbname1 stbname2 tbname1 tbname2 ...` parameter, note that this input sequence starts with the database name, supports only one database, and the second and subsequent parameters are the names of the supertables or basic tables in that database, separated by spaces;
|
||||
4. Backup the system log database: TDengine clusters usually include a system database named `log`, which contains data for TDengine's own operation, taosdump does not back up the log database by default. If there is a specific need to back up the log database, you can use the `-a` or `--allow-sys` command line parameter.
|
||||
5. "Tolerant" mode backup: Versions after taosdump 1.4.1 provide the `-n` and `-L` parameters, used for backing up data without using escape characters and in "tolerant" mode, which can reduce backup data time and space occupied when table names, column names, and label names do not use escape characters. If unsure whether to use `-n` and `-L`, use the default parameters for "strict" mode backup. For an explanation of escape characters, please refer to the [official documentation](../../sql-manual/escape-characters/).
|
||||
6. If a backup file already exists in the directory specified by the `-o` parameter, to prevent data from being overwritten, taosdump will report an error and exit. Please replace it with another empty directory or clear the original data before backing up.
|
||||
7. Currently, taosdump does not support data breakpoint backup function. Once the data backup is interrupted, it needs to be started from scratch.
|
||||
If the backup takes a long time, it is recommended to use the (-S -E options) method to specify the start/end time for segmented backup.
|
||||
|
||||
:::tip
|
||||
|
||||
|
@ -42,7 +40,8 @@ There are two ways to install taosdump:
|
|||
|
||||
### taosdump Restore Data
|
||||
|
||||
Restore data files from a specified path: use the `-i` parameter along with the data file path. As mentioned earlier, the same directory should not be used to back up different data sets, nor should the same path be used to back up the same data set multiple times, otherwise, the backup data will cause overwriting or multiple backups.
|
||||
- Restore data files from a specified path: use the `-i` parameter along with the data file path. As mentioned earlier, the same directory should not be used to back up different data sets, nor should the same path be used to back up the same data set multiple times, otherwise, the backup data will cause overwriting or multiple backups.
|
||||
- taosdump supports data recovery to a new database name with the parameter `-W`, please refer to the command line parameter description for details.
|
||||
|
||||
:::tip
|
||||
taosdump internally uses the TDengine stmt binding API to write restored data, currently using 16384 as a batch for writing. If there are many columns in the backup data, it may cause a "WAL size exceeds limit" error, in which case you can try adjusting the `-B` parameter to a smaller value.
|
||||
|
@ -105,6 +104,13 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
the table name.(Version 2.5.3)
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-W, --rename=RENAME-LIST Rename database name with new name during
|
||||
importing data. RENAME-LIST:
|
||||
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
|
||||
and rename db2 to newDB2 (Version 2.5.4)
|
||||
-k, --retry-count=VALUE Set the number of retry attempts for connection or
|
||||
query failures
|
||||
-z, --retry-sleep-ms=VALUE retry interval sleep time, unit ms
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
-R, --restful Use RESTful interface to connect TDengine
|
||||
-t, --timeout=SECONDS The timeout seconds for websocket to interact.
|
||||
|
@ -112,10 +118,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-?, --help Give this help list
|
||||
--usage Give a short usage message
|
||||
-V, --version Print program version
|
||||
-W, --rename=RENAME-LIST Rename database name with new name during
|
||||
importing data. RENAME-LIST:
|
||||
"db1=newDB1|db2=newDB2" means rename db1 to newDB1
|
||||
and rename db2 to newDB2 (Version 2.5.4)
|
||||
|
||||
Mandatory or optional arguments to long options are also mandatory or optional
|
||||
for any corresponding short options.
|
||||
|
|
|
@ -4,35 +4,38 @@ sidebar_label: taosBenchmark
|
|||
slug: /tdengine-reference/tools/taosbenchmark
|
||||
---
|
||||
|
||||
taosBenchmark (formerly known as taosdemo) is a tool for testing the performance of the TDengine product. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions. It can simulate massive data generated by a large number of devices and flexibly control the number of databases, supertables, types and number of tag columns, types and number of data columns, number of subtables, data volume per subtable, data insertion interval, number of working threads in taosBenchmark, whether and how to insert out-of-order data, etc. To accommodate the usage habits of past users, the installation package provides taosdemo as a soft link to taosBenchmark.
|
||||
TaosBenchmark is a performance benchmarking tool for TDengine products, providing insertion, query, and subscription performance testing for TDengine products, and outputting performance indicators.
|
||||
|
||||
## Installation
|
||||
|
||||
There are two ways to install taosBenchmark:
|
||||
taosBenchmark provides two installation methods:
|
||||
|
||||
- taosBenchmark is automatically installed with the official TDengine installation package, for details please refer to [TDengine Installation](../../../get-started/).
|
||||
- taosBenchmark is the default installation component in the TDengine installation package, which can be used after installing TDengine. For how to install TDengine, please refer to [TDengine Installation](../../../get started/)
|
||||
|
||||
- Compile and install taos-tools separately, for details please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository.
|
||||
- Compile and install taos tools separately, refer to [taos tools](https://github.com/taosdata/taos-tools) .
|
||||
|
||||
## Operation
|
||||
|
||||
### Configuration and Operation Methods
|
||||
|
||||
taosBenchmark needs to be executed in the operating system's terminal, and this tool supports two configuration methods: Command Line Arguments and JSON Configuration File. These two methods are mutually exclusive; when using a configuration file, only one command line argument `-f <json file>` can be used to specify the configuration file. When using command line arguments to run taosBenchmark and control its behavior, the `-f` parameter cannot be used; instead, other parameters must be used for configuration. In addition, taosBenchmark also offers a special mode of operation, which is running without any parameters.
|
||||
|
||||
taosBenchmark supports comprehensive performance testing for TDengine, and the TDengine features it supports are divided into three categories: writing, querying, and subscribing. These three functions are mutually exclusive, and each run of taosBenchmark can only select one of them. It is important to note that the type of function to be tested is not configurable when using the command line configuration method; the command line configuration method can only test writing performance. To test TDengine's query and subscription performance, you must use the configuration file method and specify the type of function to be tested through the `filetype` parameter in the configuration file.
|
||||
taosBbenchmark supports three operating modes:
|
||||
- No parameter mode
|
||||
- Command line mode
|
||||
- JSON configuration file mode
|
||||
The command-line approach is a subset of the functionality of JSON configuration files, which immediately uses the command line and then the configuration file, with the parameters specified by the command line taking precedence.
|
||||
|
||||
**Ensure that the TDengine cluster is running correctly before running taosBenchmark.**
|
||||
|
||||
### Running Without Command Line Arguments
|
||||
|
||||
Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration.
|
||||
|
||||
Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration.
|
||||
```shell
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
When running without parameters, taosBenchmark by default connects to the TDengine cluster specified under `/etc/taos`, and creates a database named `test` in TDengine, under which a supertable named `meters` is created, and 10,000 tables are created under the supertable, each table having 10,000 records inserted. Note that if a `test` database already exists, this command will delete the existing database and create a new `test` database.
|
||||
When running without parameters, taosBenchmark defaults to connecting to the TDengine cluster specified in `/etc/taos/taos.cfg `.
|
||||
After successful connection, a smart meter example database test, super meters, and 10000 sub meters will be created, with 10000 records per sub meter. If the test database already exists, it will be deleted before creating a new one.
|
||||
|
||||
### Running Using Command Line Configuration Parameters
|
||||
|
||||
|
@ -46,9 +49,7 @@ The above command `taosBenchmark` will create a database named `test`, establish
|
|||
|
||||
### Running Using a Configuration File
|
||||
|
||||
The taosBenchmark installation package includes examples of configuration files, located in `<install_directory>/examples/taosbenchmark-json`
|
||||
|
||||
Use the following command line to run taosBenchmark and control its behavior through a configuration file.
|
||||
Running in configuration file mode provides all functions, so parameters can be configured to run in the configuration file.
|
||||
|
||||
```shell
|
||||
taosBenchmark -f <json file>
|
||||
|
@ -214,6 +215,61 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-?/--help**:
|
||||
Displays help information and exits. Cannot be used with other parameters.
|
||||
|
||||
|
||||
## Output performance indicators
|
||||
|
||||
#### Write indicators
|
||||
|
||||
After writing is completed, a summary performance metric will be output in the last two lines in the following format:
|
||||
``` bash
|
||||
SUCC: Spent 8.527298 (real 8.117379) seconds to insert rows: 10000000 with 8 thread(s) into test 1172704.41 (real 1231924.74) records/second
|
||||
SUCC: insert delay, min: 19.6780ms, avg: 64.9390ms, p90: 94.6900ms, p95: 105.1870ms, p99: 130.6660ms, max: 157.0830ms
|
||||
```
|
||||
First line write speed statistics:
|
||||
- Spent: Total write time, in seconds, counting from the start of writing the first data to the end of the last data. This indicates that a total of 8.527298 seconds were spent
|
||||
- Real: Total write time (calling the engine), excluding the time spent preparing data for the testing framework. Purely counting the time spent on engine calls, The time spent is 8.117379 seconds. If 8.527298-8.117379=0.409919 seconds, it is the time spent preparing data for the testing framework
|
||||
- Rows: Write the total number of rows, which is 10 million pieces of data
|
||||
- Threads: The number of threads being written, which is 8 threads writing simultaneously
|
||||
- Records/second write speed = `total write time` / `total number of rows written`, real in parentheses is the same as before, indicating pure engine write speed
|
||||
|
||||
Second line single write delay statistics:
|
||||
- min: Write minimum delay
|
||||
- avg: Write normal delay
|
||||
- p90: Write delay p90 percentile delay number
|
||||
- p95: Write delay p95 percentile delay number
|
||||
- p99: Write delay p99 percentile delay number
|
||||
- max: maximum write delay
|
||||
Through this series of indicators, the distribution of write request latency can be observed
|
||||
|
||||
#### Query indicators
|
||||
The query performance test mainly outputs the QPS indicator of query request speed, and the output format is as follows:
|
||||
|
||||
``` bash
|
||||
complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ...
|
||||
INFO: Total specified queries: 30000
|
||||
INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049
|
||||
```
|
||||
|
||||
- The first line represents the percentile distribution of query execution and query request delay for each of the three threads executing 10000 queries. The SQL command is the test query statement
|
||||
- The second line indicates that a total of 10000 * 3 = 30000 queries have been completed
|
||||
- The third line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second
|
||||
|
||||
#### Subscription metrics
|
||||
|
||||
The subscription performance test mainly outputs consumer consumption speed indicators, with the following output format:
|
||||
``` bash
|
||||
INFO: consumer id 0 has poll total msgs: 376, period rate: 37.592 msgs/s, total rows: 3760000, period rate: 375924.815 rows/s
|
||||
INFO: consumer id 1 has poll total msgs: 362, period rate: 36.131 msgs/s, total rows: 3620000, period rate: 361313.504 rows/s
|
||||
INFO: consumer id 2 has poll total msgs: 364, period rate: 36.378 msgs/s, total rows: 3640000, period rate: 363781.731 rows/s
|
||||
INFO: consumerId: 0, consume msgs: 1000, consume rows: 10000000
|
||||
INFO: consumerId: 1, consume msgs: 1000, consume rows: 10000000
|
||||
INFO: consumerId: 2, consume msgs: 1000, consume rows: 10000000
|
||||
INFO: Consumed total msgs: 3000, total rows: 30000000
|
||||
```
|
||||
- Lines 1 to 3 real-time output of the current consumption speed of each consumer, msgs/s represents the number of consumption messages, each message contains multiple rows of data, and rows/s represents the consumption speed calculated by rows
|
||||
- Lines 4 to 6 show the overall statistics of each consumer after the test is completed, including the total number of messages consumed and the total number of lines
|
||||
- The overall statistics of all consumers in line 7, `msgs` represents how many messages were consumed in total, `rows` represents how many rows of data were consumed in total
|
||||
|
||||
## Configuration File Parameters Detailed Explanation
|
||||
|
||||
### General Configuration Parameters
|
||||
|
@ -331,21 +387,6 @@ Parameters related to supertable creation are configured in the `super_tables` s
|
|||
- **repeat_ts_max** : Numeric type, when composite primary key is enabled, specifies the maximum number of records with the same timestamp to be generated
|
||||
- **sqls** : Array of strings type, specifies the array of sql to be executed after the supertable is successfully created, the table name specified in sql must be prefixed with the database name, otherwise an unspecified database error will occur
|
||||
|
||||
#### tsma Configuration Parameters
|
||||
|
||||
Specify the configuration parameters for tsma in `super_tables` under `tsmas`, with the following specific parameters:
|
||||
|
||||
- **name**: Specifies the name of the tsma, mandatory.
|
||||
|
||||
- **function**: Specifies the function of the tsma, mandatory.
|
||||
|
||||
- **interval**: Specifies the time interval for the tsma, mandatory.
|
||||
|
||||
- **sliding**: Specifies the window time shift for the tsma, mandatory.
|
||||
|
||||
- **custom**: Specifies custom configuration appended at the end of the tsma creation statement, optional.
|
||||
|
||||
- **start_when_inserted**: Specifies when to create the tsma after how many rows are inserted, optional, default is 0.
|
||||
|
||||
#### Tag and Data Column Configuration Parameters
|
||||
|
||||
|
@ -423,6 +464,11 @@ For other common parameters, see Common Configuration Parameters.
|
|||
|
||||
Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`.
|
||||
|
||||
- **mixed_query** "yes": `Mixed Query` "no": `Normal Query`, default is "no"
|
||||
`Mixed Query`: All SQL statements in `sqls` are grouped by the number of threads, with each thread executing one group. Each SQL statement in a thread needs to perform `query_times` queries.
|
||||
`Normal Query `: Each SQL in `sqls` starts `threads` and exits after executing `query_times` times. The next SQL can only be executed after all previous SQL threads have finished executing and exited.
|
||||
Regardless of whether it is a `Normal Query` or `Mixed Query`, the total number of query executions is the same. The total number of queries = `sqls` * `threads` * `query_times`. The difference is that `Normal Query` starts `threads` for each SQL query, while ` Mixed Query` only starts `threads` once to complete all SQL queries. The number of thread startups for the two is different.
|
||||
|
||||
- **query_interval** : Query interval, in seconds, default is 0.
|
||||
|
||||
- **threads** : Number of threads executing the SQL query, default is 1.
|
||||
|
@ -433,7 +479,8 @@ Configuration parameters for querying specified tables (can specify supertables,
|
|||
|
||||
#### Configuration Parameters for Querying Supertables
|
||||
|
||||
Configuration parameters for querying supertables are set in `super_table_query`.
|
||||
Configuration parameters for querying supertables are set in `super_table_query`.
|
||||
The thread mode of the super table query is the same as the `Normal Query` mode of the specified query statement described above, except that `sqls` is filled all sub tables.
|
||||
|
||||
- **stblname** : The name of the supertable to query, required.
|
||||
|
||||
|
|
|
@ -190,6 +190,7 @@ ROUND(expr[, digits])
|
|||
- `digits` less than zero means discarding the decimal places and rounding the number to the left of the decimal point by `digits` places. If the number of places to the left of the decimal point is less than `digits`, returns 0.
|
||||
- Since the DECIMAL type is not yet supported, this function will use DOUBLE and FLOAT to represent results containing decimals, but DOUBLE and FLOAT have precision limits, and using this function may be meaningless when there are too many digits.
|
||||
- Can only be used with regular columns, selection (Selection), projection (Projection) functions, and cannot be used with aggregation (Aggregation) functions.
|
||||
- `digits` is supported from version 3.3.3.0.
|
||||
|
||||
**Example**:
|
||||
|
||||
|
@ -249,6 +250,8 @@ TAN(expr)
|
|||
|
||||
**Function Description**: Obtains the tangent result of the specified field.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
@ -297,6 +300,8 @@ TRUNCATE(expr, digits)
|
|||
|
||||
**Function Description**: Gets the truncated value of the specified field to the specified number of digits.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: Consistent with the original data type of the `expr` field.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -340,6 +345,8 @@ EXP(expr)
|
|||
|
||||
**Function Description**: Returns the value of e (the base of natural logarithms) raised to the specified power.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**: Numeric type.
|
||||
|
@ -370,6 +377,8 @@ LN(expr)
|
|||
|
||||
**Function Description**: Returns the natural logarithm of the specified parameter.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**: Numeric type.
|
||||
|
@ -401,6 +410,8 @@ MOD(expr1, expr2)
|
|||
|
||||
**Function Description**: Calculates the result of expr1 % expr2.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**: Numeric type.
|
||||
|
@ -437,6 +448,8 @@ RAND([seed])
|
|||
|
||||
**Function Description**: Returns a uniformly distributed random number from 0 to 1.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -484,6 +497,8 @@ SIGN(expr)
|
|||
|
||||
**Function Description**: Returns the sign of the specified parameter.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: Consistent with the original data type of the specified field.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
@ -527,6 +542,8 @@ DEGREES(expr)
|
|||
|
||||
**Function Description**: Calculates the value of the specified parameter converted from radians to degrees.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
@ -558,6 +575,8 @@ RADIANS(expr)
|
|||
|
||||
**Function Description**: Calculates the value of the specified parameter converted from degrees to radians.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
@ -729,6 +748,8 @@ TRIM([remstr FROM] expr)
|
|||
|
||||
**Function Description**: Returns the string expr with all prefixes or suffixes of remstr removed.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: Same as the original type of the input field expr.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -807,6 +828,8 @@ SUBSTRING/SUBSTR(expr FROM pos [FOR len])
|
|||
- If `len` is less than 1, returns an empty string.
|
||||
- `pos` is 1-based; if `pos` is 0, returns an empty string.
|
||||
- If `pos` + `len` exceeds `len(expr)`, returns the substring from `pos` to the end of the string, equivalent to executing `substring(expr, pos)`.
|
||||
- Function `SUBSTRING` is equal to `SUBSTR`, supported from ver-3.3.3.0.
|
||||
- Syntax `SUBSTRING/SUBSTR(expr FROM pos [FOR len])` is supported from ver-3.3.3.0.
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -845,6 +868,8 @@ SUBSTRING_INDEX(expr, delim, count)
|
|||
|
||||
**Function Description**: Returns a substring of `expr` cut at the position where the delimiter appears the specified number of times.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: Same as the original type of the input field `expr`.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -902,6 +927,8 @@ CHAR(expr1 [, expr2] [, expr3] ...)
|
|||
|
||||
**Function Description**: Treats the input parameters as integers and returns the characters corresponding to these integers in ASCII encoding.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: VARCHAR.
|
||||
|
||||
**Applicable Data Types**: Integer types, VARCHAR, NCHAR.
|
||||
|
@ -916,6 +943,7 @@ CHAR(expr1 [, expr2] [, expr3] ...)
|
|||
- NULL values in input parameters will be skipped.
|
||||
- If the input parameters are of string type, they will be converted to numeric type for processing.
|
||||
- If the character corresponding to the input parameter is a non-printable character, the return value will still contain the character corresponding to that parameter, but it may not be displayed.
|
||||
- This function can have at most 2^31 - 1 input parameters.
|
||||
|
||||
**Examples**:
|
||||
|
||||
|
@ -949,6 +977,8 @@ ASCII(expr)
|
|||
|
||||
**Function Description**: Returns the ASCII code of the first character of the string.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Data Type**: BIGINT.
|
||||
|
||||
**Applicable Data Types**: VARCHAR, NCHAR.
|
||||
|
@ -979,6 +1009,8 @@ POSITION(expr1 IN expr2)
|
|||
|
||||
**Function Description**: Calculates the position of string `expr1` in string `expr2`.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: BIGINT.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -1026,6 +1058,8 @@ REPLACE(expr, from_str, to_str)
|
|||
|
||||
**Function Description**: Replaces all occurrences of `from_str` in the string with `to_str`.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: Same as the original type of the input field `expr`.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -1061,6 +1095,8 @@ REPEAT(expr, count)
|
|||
|
||||
**Function Description**: Returns a string that repeats the string `expr` a specified number of times.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: Same as the original type of the input field `expr`.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -1319,6 +1355,7 @@ TIMEDIFF(expr1, expr2 [, time_unit])
|
|||
- `expr1`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format.
|
||||
- `expr2`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format.
|
||||
- `time_unit`: See usage instructions.
|
||||
- `timediff` return the absolute value of the difference between timestamp `expr1` and `expr2` before ver-3.3.3.0.
|
||||
|
||||
**Nested Subquery Support**: Applicable to both inner and outer queries.
|
||||
|
||||
|
@ -1423,6 +1460,8 @@ WEEK(expr [, mode])
|
|||
|
||||
**Function Description**: Returns the week number of the input date.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Result Type**: BIGINT.
|
||||
|
||||
**Applicable Data Types**:
|
||||
|
@ -1490,6 +1529,8 @@ WEEKOFYEAR(expr)
|
|||
|
||||
**Function Description**: Returns the week number of the input date.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: BIGINT.
|
||||
|
||||
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
||||
|
@ -1521,6 +1562,8 @@ WEEKDAY(expr)
|
|||
|
||||
**Function Description**: Returns the weekday of the input date.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: BIGINT.
|
||||
|
||||
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
||||
|
@ -1552,6 +1595,8 @@ DAYOFWEEK(expr)
|
|||
|
||||
**Function Description**: Returns the weekday of the input date.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Type**: BIGINT.
|
||||
|
||||
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
||||
|
@ -1707,6 +1752,9 @@ STDDEV/STDDEV_POP(expr)
|
|||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Description**:
|
||||
- Function `STDDEV_POP` equals `STDDEV` and is supported from ver-3.3.3.0.
|
||||
|
||||
**Example**:
|
||||
|
||||
```sql
|
||||
|
@ -1733,6 +1781,8 @@ VAR_POP(expr)
|
|||
|
||||
**Function Description**: Calculates the population variance of a column in a table.
|
||||
|
||||
**Version**: ver-3.3.3.0
|
||||
|
||||
**Return Data Type**: DOUBLE.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
@ -1975,7 +2025,8 @@ MAX(expr)
|
|||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Usage Instructions**: The max function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value.
|
||||
**Usage Instructions**:
|
||||
- The max function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value(supported from ver-3.3.3.0, function `max` only accept numeric parameter before ver-3.3.3.0).
|
||||
|
||||
### MIN
|
||||
|
||||
|
@ -1991,7 +2042,8 @@ MIN(expr)
|
|||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Usage Instructions**: The min function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value.
|
||||
**Usage Instructions**:
|
||||
- The min function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value(supported from ver-3.3.3.0, function `min` only accept numeric parameter before ver-3.3.3.0).
|
||||
|
||||
### MODE
|
||||
|
||||
|
|
|
@ -41,38 +41,28 @@ If there is a single replica on the node and the node is offline, to forcibly de
|
|||
ALTER DNODE dnode_id dnode_option
|
||||
|
||||
ALTER ALL DNODES dnode_option
|
||||
|
||||
dnode_option: {
|
||||
'resetLog'
|
||||
| 'balance' 'value'
|
||||
| 'monitor' 'value'
|
||||
| 'debugFlag' 'value'
|
||||
| 'monDebugFlag' 'value'
|
||||
| 'vDebugFlag' 'value'
|
||||
| 'mDebugFlag' 'value'
|
||||
| 'cDebugFlag' 'value'
|
||||
| 'httpDebugFlag' 'value'
|
||||
| 'qDebugflag' 'value'
|
||||
| 'sdbDebugFlag' 'value'
|
||||
| 'uDebugFlag' 'value'
|
||||
| 'tsdbDebugFlag' 'value'
|
||||
| 'sDebugflag' 'value'
|
||||
| 'rpcDebugFlag' 'value'
|
||||
| 'dDebugFlag' 'value'
|
||||
| 'mqttDebugFlag' 'value'
|
||||
| 'wDebugFlag' 'value'
|
||||
| 'tmrDebugFlag' 'value'
|
||||
| 'cqDebugFlag' 'value'
|
||||
}
|
||||
```
|
||||
|
||||
The modifiable configuration items in the syntax above are configured in the same way as in the dnode configuration file, the difference being that modifications are dynamic, take immediate effect, and do not require restarting the dnode.
|
||||
For configuration parameters that support dynamic modification, you can use the ALTER DNODE or ALTER ALL DNODES syntax to modify the values of configuration parameters in a dnode. Starting from version 3.3.4.0, the modified configuration parameters will be automatically persisted and will remain effective even after the database service is restarted.
|
||||
|
||||
`value` is the value of the parameter, which needs to be in string format. For example, to change the log output level of dnode 1 to debug:
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](../01-components/01-taosd.md)
|
||||
|
||||
The value is the parameter's value and needs to be in character format. For example, to change the log output level of dnode 1 to debug:
|
||||
|
||||
```sql
|
||||
ALTER DNODE 1 'debugFlag' '143';
|
||||
```
|
||||
### Additional Notes:
|
||||
Configuration parameters in a dnode are divided into global configuration parameters and local configuration parameters. You can check the category field in SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE to determine whether a configuration parameter is a global configuration parameter or a local configuration parameter:
|
||||
|
||||
Local configuration parameters: You can use ALTER DNODE or ALTER ALL DNODES to update the local configuration parameters of a specific dnode or all dnodes.
|
||||
Global configuration parameters: Global configuration parameters require consistency across all dnodes, so you can only use ALTER ALL DNODES to update the global configuration parameters of all dnodes.
|
||||
There are three cases for whether a configuration parameter can be dynamically modified:
|
||||
|
||||
Supports dynamic modification, effective immediately
|
||||
Supports dynamic modification, effective after restart
|
||||
Does not support dynamic modification
|
||||
For configuration parameters that take effect after a restart, you can see the modified values through SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE, but you need to restart the database service to make them effective.
|
||||
|
||||
## Add Management Node
|
||||
|
||||
|
@ -136,18 +126,12 @@ If the client is also considered as part of the cluster in a broader sense, the
|
|||
|
||||
```sql
|
||||
ALTER LOCAL local_option
|
||||
|
||||
local_option: {
|
||||
'resetLog'
|
||||
| 'rpcDebugFlag' 'value'
|
||||
| 'tmrDebugFlag' 'value'
|
||||
| 'cDebugFlag' 'value'
|
||||
| 'uDebugFlag' 'value'
|
||||
| 'debugFlag' 'value'
|
||||
}
|
||||
```
|
||||
|
||||
The parameters in the syntax above are used in the same way as in the configuration file for the client, but do not require a restart of the client, and the changes take effect immediately.
|
||||
You can use the above syntax to modify the client's configuration parameters, and there is no need to restart the client. The changes take effect immediately.
|
||||
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](../01-components/02-taosc.md)
|
||||
|
||||
|
||||
## View Client Configuration
|
||||
|
||||
|
|
|
@ -342,3 +342,18 @@ Note: Users with SYSINFO property set to 0 cannot view this table.
|
|||
| 10 | raw_data | BIGINT | Estimated size of raw data, in KB |
|
||||
|
||||
note:
|
||||
|
||||
## INS_FILESETS
|
||||
|
||||
Provides information about file sets.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** | ** |
|
||||
| --- | :-----------: | ------------- | ---------------------------------------------------- |
|
||||
| 1 | db_name | VARCHAR(65) | Database name |
|
||||
| 2 | vgroup_id | INT | Vgroup ID |
|
||||
| 3 | fileset_id | INT | File set ID |
|
||||
| 4 | start_time | TIMESTAMP | Start time of the time range covered by the file set |
|
||||
| 5 | end_time | TIMESTAMP | End time of the time range covered by the file set |
|
||||
| 6 | total_size | BIGINT | Total size of the file set |
|
||||
| 7 | last_compact | TIMESTAMP | Time of the last compaction |
|
||||
| 8 | shold_compact | bool | Whether the file set should be compacted |
|
||||
|
|
|
@ -13,16 +13,16 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
|||
|
||||
The username can be up to 23 bytes long.
|
||||
|
||||
The password can be up to 31 bytes long. The password can include letters, numbers, and special characters except for single quotes, double quotes, backticks, backslashes, and spaces, and it cannot be an empty string.
|
||||
The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
||||
|
||||
`SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`.
|
||||
|
||||
`CREATEDB` indicates whether the user can create databases. `1` means they can create databases, `0` means they have no permission to create databases. The default value is `0`. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||
|
||||
In the example below, we create a user with the password `123456` who can view system information.
|
||||
In the example below, we create a user with the password `abc123!@#` who can view system information.
|
||||
|
||||
```sql
|
||||
taos> create user test pass '123456' sysinfo 1;
|
||||
taos> create user test pass 'abc123!@#' sysinfo 1;
|
||||
Query OK, 0 of 0 rows affected (0.001254s)
|
||||
```
|
||||
|
||||
|
|
|
@ -35,9 +35,9 @@ Please refer to the [Supported Platforms List](../#supported-platforms)
|
|||
|
||||
### Version History
|
||||
|
||||
| TDengine Client Version | Main Changes | TDengine Version |
|
||||
| :------------------: | :---------------------------: | :----------------: |
|
||||
| 3.3.3.0 | First release, providing comprehensive support for SQL execution, parameter binding, schema-less writing, and data subscription. | 3.3.2.0 and higher versions |
|
||||
| TDengine Client Version | Major Changes | TDengine Version |
|
||||
| ------------------ | --------------------------- | ---------------- |
|
||||
| 3.3.3.0 | First release, providing comprehensive support for SQL execution, parameter binding, schema-less writing, and data subscription. | 3.3.2.0 and higher |
|
||||
|
||||
### Error Codes
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ slug: /tdengine-reference/client-libraries/java
|
|||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import RequestId from "./_request_id.mdx";
|
||||
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||
|
||||
`taos-jdbcdriver` is the official Java connector for TDengine, allowing Java developers to develop applications that access the TDengine database. `taos-jdbcdriver` implements the interfaces of the JDBC driver standard.
|
||||
|
||||
|
@ -18,45 +18,45 @@ The JDBC driver implementation for TDengine strives to be consistent with relati
|
|||
|
||||
:::
|
||||
|
||||
## JDBC and JRE Compatibility
|
||||
## JDBC and JRE Version Compatibility
|
||||
|
||||
- JDBC: Supports JDBC 4.2, with some features like schemaless writing and data subscription provided separately
|
||||
- JRE: Supports JRE 8 and above
|
||||
- JDBC: Supports JDBC 4.2 and above.
|
||||
- JRE: Supports JRE 8 and above.
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
Native connection supports the same platforms as the TDengine client driver.
|
||||
REST connection supports all platforms that can run Java.
|
||||
- Native connection supports the same platforms as the TDengine client driver.
|
||||
- WebSocket/REST connection supports all platforms that can run Java.
|
||||
|
||||
## Version History
|
||||
|
||||
| taos-jdbcdriver Version | Main Changes | TDengine Version |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||
| 3.4.0 | 1. Replaced fastjson library with jackson; 2. WebSocket uses a separate protocol identifier; 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||
| 3.3.4 | 1. Fixed getInt error when data type is float | - |
|
||||
| 3.3.3 | 1. Fixed memory leak caused by closing WebSocket statement | - |
|
||||
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection; 2. Improved support for mybatis | - |
|
||||
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection; 2. Supports skipping SSL verification, off by default | 3.3.2.0 and higher |
|
||||
| 3.2.11 | Fixed a bug in closing result set in Native connection | - |
|
||||
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission; 2. WebSocket automatic reconnection mechanism, off by default; 3. Connection class provides methods for schemaless writing; 4. Optimized data fetching performance for native connections; 5. Fixed some known issues; 6. Metadata retrieval functions can return a list of supported functions. | - |
|
||||
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement | - |
|
||||
| 3.2.8 | Optimized auto-commit, fixed manual commit bug in WebSocket, optimized WebSocket prepareStatement using a single connection, metadata supports views | - |
|
||||
| 3.2.7 | Supports VARBINARY and GEOMETRY types, added timezone setting support for native connections. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
||||
| 3.2.5 | Data subscription adds committed() and assignment() methods | 3.1.0.3 and higher |
|
||||
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
||||
| 3.2.3 | Fixed ResultSet data parsing failure in some cases | - |
|
||||
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
||||
| 3.2.1 | New feature: WebSocket connection supports schemaless and prepareStatement writing. Change: consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
||||
| 3.2.0 | Connection issues, not recommended for use | - |
|
||||
| 3.1.0 | WebSocket connection supports subscription function | - |
|
||||
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8 | - |
|
||||
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
||||
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection | - |
|
||||
| 2.0.41 | Fixed username and password encoding method in REST connection | - |
|
||||
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings | - |
|
||||
| 2.0.38 | JDBC REST connection adds batch fetching function | - |
|
||||
| 2.0.37 | Added support for json tag | - |
|
||||
| 2.0.36 | Added support for schemaless writing | - |
|
||||
| taos-jdbcdriver Version | Major Changes | TDengine Version |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||
| 3.3.4 | Fixed getInt error when data type is float. | - |
|
||||
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
|
||||
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
|
||||
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
|
||||
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
|
||||
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
|
||||
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
|
||||
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
|
||||
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
||||
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
|
||||
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
||||
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
|
||||
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
||||
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
||||
| 3.2.0 | Connection issues, not recommended for use. | - |
|
||||
| 3.1.0 | WebSocket connection supports subscription function. | - |
|
||||
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
|
||||
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
||||
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
|
||||
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
|
||||
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
|
||||
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
|
||||
| 2.0.37 | Added support for json tag. | - |
|
||||
| 2.0.36 | Added support for schemaless writing. | - |
|
||||
|
||||
## Exceptions and Error Codes
|
||||
|
||||
|
|
|
@ -6,24 +6,42 @@ slug: /tdengine-reference/client-libraries/go
|
|||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import RequestId from "./_request_id.mdx";
|
||||
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||
|
||||
`driver-go` is the official Go language connector for TDengine, implementing the interface of the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access data in the TDengine cluster.
|
||||
|
||||
## Compatibility
|
||||
## Go Version Compatibility
|
||||
|
||||
Supports a minimum Go version of 1.14, but the latest version of Go is recommended.
|
||||
Supports Go 1.14 and above.
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
Native connections support the same platforms as the TDengine client driver.
|
||||
REST connections support all platforms that can run Go.
|
||||
- Native connections support the same platforms as the TDengine client driver.
|
||||
- WebSocket/REST connections support all platforms that can run Go.
|
||||
|
||||
## Version Support
|
||||
## Version History
|
||||
|
||||
Please refer to the [version support list](https://github.com/taosdata/driver-go#remind).
|
||||
| driver-go Version | Major Changes | TDengine Version |
|
||||
|------------------|------------------------------------------------------------------|-------------------|
|
||||
| v3.5.8 | Fixed null pointer exception. | - |
|
||||
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
|
||||
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
|
||||
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
|
||||
| v3.5.3 | Refactored taosWS. | - |
|
||||
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
|
||||
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
|
||||
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
|
||||
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
|
||||
| v3.1.0 | Provided Kafka-like subscription API. | - |
|
||||
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
|
||||
| v3.0.3 | Websocket-based statement insert. | - |
|
||||
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
|
||||
| v3.0.1 | Websocket-based message subscription. | - |
|
||||
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
|
||||
|
||||
## Handling Exceptions
|
||||
|
||||
## Exceptions and Error Codes
|
||||
|
||||
If it is a TDengine error, you can obtain the error code and error message as follows.
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@ title: Rust Client Library
|
|||
slug: /tdengine-reference/client-libraries/rust
|
||||
---
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
import Preparation from "./_preparation.mdx"
|
||||
import RequestId from "./_request_id.mdx";
|
||||
import Preparation from "../../assets/resources/_preparation.mdx"
|
||||
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||
|
||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||
|
||||
|
@ -16,37 +16,30 @@ import RequestId from "./_request_id.mdx";
|
|||
|
||||
The source code for this Rust connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-rust).
|
||||
|
||||
## Connection Methods
|
||||
## Rust Version Compatibility
|
||||
|
||||
`taos` provides two ways to establish a connection. Generally, we recommend using **WebSocket Connection**.
|
||||
|
||||
- **Native Connection**, which connects to a TDengine instance via the TDengine client driver (taosc).
|
||||
- **WebSocket Connection**, which connects to a TDengine instance via the WebSocket interface of taosAdapter.
|
||||
|
||||
You can specify which connector to use through different "features (i.e., the Cargo keyword `features`)" (both are supported by default).
|
||||
|
||||
For a detailed introduction to connection methods, please refer to: [Connection Methods](../../../developer-guide/connecting-to-tdengine/)
|
||||
Supports Rust 1.70 and above.
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
The platforms supported by the native connection are consistent with those supported by the TDengine client driver.
|
||||
WebSocket connection supports all platforms that can run Rust.
|
||||
- The platforms supported by the native connection are consistent with those supported by the TDengine client driver.
|
||||
- WebSocket connection supports all platforms that can run Rust.
|
||||
|
||||
## Version History
|
||||
|
||||
| Rust Connector Version | TDengine Version | Main Features |
|
||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||
| v0.12.3 | 3.3.0.0 or later | Optimized WebSocket query and insertion performance, support for VARBINARY and GEOMETRY types |
|
||||
| v0.12.0 | 3.2.3.0 or later | WS supports compression. |
|
||||
| v0.11.0 | 3.2.0.0 | TMQ feature optimization. |
|
||||
| v0.10.0 | 3.1.0.0 | WS endpoint change. |
|
||||
| v0.9.2 | 3.0.7.0 | STMT: ws to get tag_fields, col_fields. |
|
||||
| v0.8.12 | 3.0.5.0 | Message subscription: get consumption progress and start consuming at a specified progress. |
|
||||
| v0.8.0 | 3.0.4.0 | Supports schema-less writing. |
|
||||
| v0.7.6 | 3.0.3.0 | Supports using req_id in requests. |
|
||||
| v0.6.0 | 3.0.0.0 | Basic functionality. |
|
||||
| Rust Connector Version | Major Changes | TDengine Version |
|
||||
| ---------------------- | ----------------------------------------------------------------------------------------------------- | ------------------ |
|
||||
| v0.12.3 | 1. Optimized WebSocket query and insert performance. <br/> 2. Supported VARBINARY and GEOMETRY types. | 3.3.0.0 and higher |
|
||||
| v0.12.0 | WebSocket supports compression. | 3.2.3.0 and higher |
|
||||
| v0.11.0 | TMQ feature optimization. | 3.2.0.0 and higher |
|
||||
| v0.10.0 | WebSocket endpoint change. | 3.1.0.0 and higher |
|
||||
| v0.9.2 | STMT: WebSocket to get tag_fields, col_fields. | 3.0.7.0 and higher |
|
||||
| v0.8.12 | Message subscription: get consumption progress and start consuming at a specified progress. | 3.0.5.0 and higher |
|
||||
| v0.8.0 | Supports schema-less writing. | 3.0.4.0 and higher |
|
||||
| v0.7.6 | Supports using req_id in requests. | 3.0.3.0 and higher |
|
||||
| v0.6.0 | Basic functionality. | 3.0.0.0 and higher |
|
||||
|
||||
## Error Handling
|
||||
## Exceptions and Error Codes
|
||||
|
||||
After an error occurs, you can obtain detailed information about the error:
|
||||
|
||||
|
@ -81,14 +74,14 @@ TDengine currently supports timestamp, numeric, character, and boolean types, wi
|
|||
| BINARY | Vec\<u8> |
|
||||
| NCHAR | String |
|
||||
| JSON | serde_json::Value |
|
||||
| VARBINARY | Bytes |
|
||||
| GEOMETRY | Bytes |
|
||||
| VARBINARY | Bytes |
|
||||
| GEOMETRY | Bytes |
|
||||
|
||||
**Note**: The JSON type is only supported in tags.
|
||||
|
||||
## Summary of Example Programs
|
||||
|
||||
For the source code of the example programs, please refer to: [rust example](https://github.com/taosdata/TDengine/tree/main/docs/examples/rust)
|
||||
Please refer to: [rust example](https://github.com/taosdata/TDengine/tree/main/docs/examples/rust)
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
|
@ -97,9 +90,9 @@ Please refer to [FAQ](../../../frequently-asked-questions/)
|
|||
## API Reference
|
||||
|
||||
The Rust connector interfaces are divided into synchronous and asynchronous interfaces, where the synchronous interfaces are generally implemented by the asynchronous ones, and the method signatures are basically the same except for the async keyword. For interfaces where the synchronous and asynchronous functionalities are the same, this document only provides explanations for the synchronous interfaces.
|
||||
For WebSocket connections and native connections, other than the different DSNs required to establish the connections, there is no difference in calling other interfaces.
|
||||
For WebSocket connections and native connections, other than the different DSNs required to establish the connections, there is no difference in calling other interfaces.
|
||||
|
||||
### Connection Features
|
||||
### Connection Features
|
||||
|
||||
#### DSN
|
||||
|
||||
|
@ -132,26 +125,31 @@ A complete DSN description string example is as follows: `taos+ws://localhost:60
|
|||
The TaosBuilder struct primarily provides methods for building Taos objects based on DSN, as well as features for checking connections and obtaining the client version number.
|
||||
|
||||
- `fn available_params() -> &'static [&'static str]`
|
||||
|
||||
- **Interface Description**: Retrieves a list of available parameters in the DSN.
|
||||
- **Return Value**: Returns a reference to a static slice of strings containing the names of available parameters.
|
||||
|
||||
- `fn from_dsn<D: IntoDsn>(dsn: D) -> RawResult<Self>`
|
||||
|
||||
- **Interface Description**: Creates a connection using a DSN string without checking the connection.
|
||||
- **Parameter Description**:
|
||||
- `dsn`: DSN string or a type that can be converted into a DSN.
|
||||
- **Return Value**: On success, returns a `RawResult` of its own type; on failure, returns an error.
|
||||
|
||||
- `fn client_version() -> &'static str`
|
||||
|
||||
- **Interface Description**: Gets the client version.
|
||||
- **Return Value**: Returns a static string of the client version.
|
||||
|
||||
- `fn ping(&self, _: &mut Self::Target) -> RawResult<()>`
|
||||
|
||||
- **Interface Description**: Checks if the connection is still alive.
|
||||
- **Parameter Description**:
|
||||
- `_`: Mutable reference to the target connection.
|
||||
- **Return Value**: On success, returns an empty `RawResult`; on failure, returns an error.
|
||||
|
||||
- `fn ready(&self) -> bool`
|
||||
|
||||
- **Interface Description**: Checks if it is ready to connect.
|
||||
- **Return Value**: Mostly returns `true`, indicating the address is ready for connection.
|
||||
|
||||
|
@ -168,20 +166,24 @@ Executing SQL primarily uses the Taos struct, and obtaining the result set and m
|
|||
The Taos struct provides multiple database operation APIs, including: executing SQL, schema-less writing, and some common database query encapsulations (such as creating databases, fetching)
|
||||
|
||||
- `pub fn is_native(&self) -> bool`
|
||||
|
||||
- **Interface Description**: Determines if the connection uses a native protocol.
|
||||
- **Return Value**: Returns `true` if using a native protocol, otherwise returns `false`.
|
||||
|
||||
- `pub fn is_ws(&self) -> bool`
|
||||
|
||||
- **Interface Description**: Determines if the connection uses the WebSocket protocol.
|
||||
- **Return Value**: Returns `true` if using the WebSocket protocol, otherwise returns `false`.
|
||||
|
||||
- `fn query<T: AsRef<str>>(&self, sql: T) -> RawResult<Self::ResultSet>`
|
||||
|
||||
- **Interface Description**: Executes an SQL query.
|
||||
- **Parameter Description**:
|
||||
- `sql`: The SQL statement to execute.
|
||||
- **Return Value**: On success, returns a `RawResult` of the `ResultSet`; on failure, returns an error.
|
||||
|
||||
- `fn query_with_req_id<T: AsRef<str>>(&self, sql: T, req_id: u64) -> RawResult<Self::ResultSet>`
|
||||
|
||||
- **Interface Description**: Executes an SQL query with a request ID.
|
||||
- **Parameter Description**:
|
||||
- `sql`: The SQL statement to execute.
|
||||
|
@ -189,28 +191,33 @@ The Taos struct provides multiple database operation APIs, including: executing
|
|||
- **Return Value**: On success, returns a `RawResult` of the `ResultSet`; on failure, returns an error.
|
||||
|
||||
- `fn exec<T: AsRef<str>>(&self, sql: T) -> RawResult<usize>`
|
||||
|
||||
- **Interface Description**: Executes an SQL statement.
|
||||
- **Parameter Description**:
|
||||
- `sql`: The SQL statement to execute.
|
||||
- **Return Value**: On success, returns the number of affected rows; on failure, returns an error.
|
||||
|
||||
- `fn exec_many<T: AsRef<str>, I: IntoIterator<Item = T>>(&self, input: I) -> RawResult<usize>`
|
||||
|
||||
- **Interface Description**: Executes multiple SQL statements in batch.
|
||||
- **Parameter Description**:
|
||||
- `input`: Collection of SQL statements to execute.
|
||||
- **Return Value**: On success, returns the total number of affected rows; on failure, returns an error.
|
||||
|
||||
- `fn query_one<T: AsRef<str>, O: DeserializeOwned>(&self, sql: T) -> RawResult<Option<O>>`
|
||||
|
||||
- **Interface Description**: Executes an SQL query and returns a single result.
|
||||
- **Parameter Description**:
|
||||
- `sql`: The SQL statement to execute.
|
||||
- **Return Value**: On success, returns an optional result object; on failure, returns an error.
|
||||
|
||||
- `fn server_version(&self) -> RawResult<Cow<str>>`
|
||||
|
||||
- **Interface Description**: Gets the server version.
|
||||
- **Return Value**: On success, returns the server version string as a `RawResult`; on failure, returns an error.
|
||||
|
||||
- `fn create_topic(&self, name: impl AsRef<str>, sql: impl AsRef<str>) -> RawResult<()>`
|
||||
|
||||
- **Interface Description**: Creates a topic.
|
||||
- **Parameter Description**:
|
||||
- `name`: The name of the topic.
|
||||
|
@ -218,20 +225,24 @@ The Taos struct provides multiple database operation APIs, including: executing
|
|||
- **Return Value**: On success, returns an empty `RawResult`; on failure, returns an error.
|
||||
|
||||
- `fn databases(&self) -> RawResult<Vec<ShowDatabase>>`
|
||||
|
||||
- **Interface Description**: Retrieves a list of databases.
|
||||
- **Return Value**: On success, returns a list of databases as a `RawResult`; on failure, returns an error.
|
||||
|
||||
- `fn topics(&self) -> RawResult<Vec<Topic>>`
|
||||
|
||||
- **Interface Description**: Retrieves topic information.
|
||||
- **Return Value**: On success, returns a list of topics as a `RawResult`; on failure, returns an error.
|
||||
|
||||
- `fn describe(&self, table: &str) -> RawResult<Describe>`
|
||||
|
||||
- **Interface Description**: Describes the table structure.
|
||||
- **Parameter Description**:
|
||||
- `table`: The name of the table.
|
||||
- **Return Value**: On success, returns a description of the table structure as a `RawResult`; on failure, returns an error.
|
||||
|
||||
- `fn database_exists(&self, name: &str) -> RawResult<bool>`
|
||||
|
||||
- **Interface Description**: Checks if a database exists.
|
||||
- **Parameter Description**:
|
||||
- `name`: The name of the database.
|
||||
|
@ -243,11 +254,12 @@ The Taos struct provides multiple database operation APIs, including: executing
|
|||
- `data`: Schema-less data.
|
||||
- **Return Value**: On success, returns an empty `RawResult`; on failure, returns an error.
|
||||
|
||||
### SmlData
|
||||
### SmlData
|
||||
|
||||
The SmlData structure provides a data structure for schema-less writing and methods for accessing properties.
|
||||
|
||||
- `pub struct SmlData`
|
||||
|
||||
- **Structure Description**: The `SmlData` structure is used to store schema-less data and related information.
|
||||
- **Field Description**:
|
||||
- `protocol`: Schema-less protocol, supports InfluxDB `Line`, OpenTSDB `Telnet`, OpenTSDB `Json`.
|
||||
|
@ -257,18 +269,22 @@ The SmlData structure provides a data structure for schema-less writing and meth
|
|||
- `req_id`: Request ID.
|
||||
|
||||
- `pub fn protocol(&self) -> SchemalessProtocol`
|
||||
|
||||
- **Interface Description**: Gets the schema-less protocol.
|
||||
- **Return Value**: Schema-less protocol type, supports InfluxDB `Line`, OpenTSDB `Telnet`, OpenTSDB `Json`.
|
||||
|
||||
- `pub fn precision(&self) -> SchemalessPrecision`
|
||||
|
||||
- **Interface Description**: Gets the timestamp precision.
|
||||
- **Return Value**: Timestamp precision type, supports `Hours`, `Minutes`, `Seconds`, `Millisecond` (default), `Microsecond`, `Nanosecond`.
|
||||
|
||||
- `pub fn data(&self) -> &Vec<String>`
|
||||
|
||||
- **Interface Description**: Retrieves the list of data.
|
||||
- **Return Value**: Reference to the list of data.
|
||||
|
||||
- `pub fn ttl(&self) -> Option<i32>`
|
||||
|
||||
- **Interface Description**: Get the data time-to-live.
|
||||
- **Return Value**: Time-to-live of the data (optional), in seconds.
|
||||
|
||||
|
@ -276,41 +292,49 @@ The SmlData structure provides a data structure for schema-less writing and meth
|
|||
- **Interface Description**: Get the request ID.
|
||||
- **Return Value**: Request ID (optional).
|
||||
|
||||
### Result Retrieval
|
||||
### Result Retrieval
|
||||
|
||||
#### ResultSet
|
||||
|
||||
The ResultSet structure provides methods for accessing the data and metadata of the result set.
|
||||
|
||||
- `fn affected_rows(&self) -> i32`
|
||||
|
||||
- **Interface Description**: Get the number of affected rows.
|
||||
- **Return Value**: Number of affected rows, type `i32`.
|
||||
|
||||
- `fn precision(&self) -> Precision`
|
||||
|
||||
- **Interface Description**: Get precision information.
|
||||
- **Return Value**: Precision information, type `Precision`.
|
||||
|
||||
- `fn fields(&self) -> &[Field]`
|
||||
|
||||
- **Interface Description**: Get field information. See the Field structure description below.
|
||||
- **Return Value**: Reference to an array of field information.
|
||||
|
||||
- `fn summary(&self) -> (usize, usize)`
|
||||
|
||||
- **Interface Description**: Get summary information.
|
||||
- **Return Value**: A tuple containing two `usize` types, representing some statistical information.
|
||||
|
||||
- `fn num_of_fields(&self) -> usize`
|
||||
|
||||
- **Interface Description**: Get the number of fields.
|
||||
- **Return Value**: Number of fields, type `usize`.
|
||||
|
||||
- `fn blocks(&mut self) -> IBlockIter<'_, Self>`
|
||||
|
||||
- **Interface Description**: Get an iterator for the raw data blocks.
|
||||
- **Return Value**: Iterator for the raw data blocks, type `IBlockIter<'_, Self>`.
|
||||
|
||||
- `fn rows(&mut self) -> IRowsIter<'_, Self>`
|
||||
|
||||
- **Interface Description**: Get an iterator for row-wise querying.
|
||||
- **Return Value**: Iterator for row-wise querying, type `IRowsIter<'_, Self>`.
|
||||
|
||||
- `fn deserialize<T>(&mut self) -> Map<IRowsIter<'_, Self>, fn(_: Result<RowView<'_>, Error>) -> Result<T, Error>>`
|
||||
|
||||
- **Interface Description**: Deserialize row data.
|
||||
- **Generic Parameters**:
|
||||
- `T`: Target type, must implement `DeserializeOwned`.
|
||||
|
@ -320,15 +344,17 @@ The ResultSet structure provides methods for accessing the data and metadata of
|
|||
- **Interface Description**: Convert the result set into a two-dimensional vector of values.
|
||||
- **Return Value**: On success, returns a two-dimensional vector of values, on failure returns an error, type `Result<Vec<Vec<Value>>, Error>`.
|
||||
|
||||
#### Field
|
||||
#### Field
|
||||
|
||||
The Field structure provides methods for accessing field information.
|
||||
|
||||
- `pub const fn empty() -> Field`
|
||||
|
||||
- **Interface Description**: Create an empty `Field` instance.
|
||||
- **Return Value**: Returns an empty `Field` instance.
|
||||
|
||||
- `pub fn new(name: impl Into<String>, ty: Ty, bytes: u32) -> Field`
|
||||
|
||||
- **Interface Description**: Create a new `Field` instance.
|
||||
- **Parameter Description**:
|
||||
- `name`: Field name.
|
||||
|
@ -337,22 +363,27 @@ The Field structure provides methods for accessing field information.
|
|||
- **Return Value**: Returns a new `Field` instance.
|
||||
|
||||
- `pub fn name(&self) -> &str`
|
||||
|
||||
- **Interface Description**: Get the field name.
|
||||
- **Return Value**: Returns the field name.
|
||||
|
||||
- `pub fn escaped_name(&self) -> String`
|
||||
|
||||
- **Interface Description**: Get the escaped field name.
|
||||
- **Return Value**: Returns the escaped field name.
|
||||
|
||||
- `pub const fn ty(&self) -> Ty`
|
||||
|
||||
- **Interface Description**: Get the field type.
|
||||
- **Return Value**: Returns the field type.
|
||||
|
||||
- `pub const fn bytes(&self) -> u32`
|
||||
|
||||
- **Interface Description**: Get the preset length of the field.
|
||||
- **Return Value**: For variable-length data types, returns the preset length; for other types, returns the byte width.
|
||||
|
||||
- `pub fn to_c_field(&self) -> c_field_t`
|
||||
|
||||
- **Interface Description**: Converts a `Field` instance into a C language structure.
|
||||
- **Return Value**: Returns the field represented by a C language structure.
|
||||
|
||||
|
@ -364,17 +395,19 @@ The Field structure provides methods for accessing field information.
|
|||
|
||||
Parameter binding functionality is mainly supported by the Stmt structure.
|
||||
|
||||
#### Stmt
|
||||
#### Stmt
|
||||
|
||||
The Stmt structure provides functionality related to parameter binding, used for efficient writing.
|
||||
|
||||
- `fn init(taos: &Q) -> RawResult<Self>`
|
||||
|
||||
- **Interface Description**: Initialize the parameter binding instance.
|
||||
- **Parameter Description**:
|
||||
- `taos`: Database connection instance.
|
||||
- **Return Value**: On success, returns the initialized instance; on failure, returns an error.
|
||||
|
||||
- `fn init_with_req_id(taos: &Q, req_id: u64) -> RawResult<Self>`
|
||||
|
||||
- **Interface Description**: Initialize the parameter binding instance using a request ID.
|
||||
- **Parameter Description**:
|
||||
- `taos`: Database connection instance.
|
||||
|
@ -382,24 +415,28 @@ The Stmt structure provides functionality related to parameter binding, used for
|
|||
- **Return Value**: On success, returns the initialized instance; on failure, returns an error.
|
||||
|
||||
- `fn prepare<S: AsRef<str>>(&mut self, sql: S) -> RawResult<&mut Self>`
|
||||
|
||||
- **Interface Description**: Prepare the SQL statement to be bound.
|
||||
- **Parameter Description**:
|
||||
- `sql`: SQL statement to prepare.
|
||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||
|
||||
- `fn set_tbname<S: AsRef<str>>(&mut self, name: S) -> RawResult<&mut Self>`
|
||||
|
||||
- **Interface Description**: Set the table name.
|
||||
- **Parameter Description**:
|
||||
- `name`: Table name.
|
||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||
|
||||
- `fn set_tags(&mut self, tags: &[Value]) -> RawResult<&mut Self>`
|
||||
|
||||
- **Interface Description**: Set tags.
|
||||
- **Parameter Description**:
|
||||
- `tags`: Array of tags.
|
||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||
|
||||
- `fn set_tbname_tags<S: AsRef<str>>(&mut self, name: S, tags: &[Value]) -> RawResult<&mut Self>`
|
||||
|
||||
- **Interface Description**: Set the table name and tags.
|
||||
- **Parameter Description**:
|
||||
- `name`: Table name.
|
||||
|
@ -407,16 +444,19 @@ The Stmt structure provides functionality related to parameter binding, used for
|
|||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||
|
||||
- `fn bind(&mut self, params: &[ColumnView]) -> RawResult<&mut Self>`
|
||||
|
||||
- **Interface Description**: Bind parameters.
|
||||
- **Parameter Description**:
|
||||
- `params`: Array of parameters.
|
||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||
|
||||
- `fn add_batch(&mut self) -> RawResult<&mut Self>`
|
||||
|
||||
- **Interface Description**: Add a batch.
|
||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||
|
||||
- `fn execute(&mut self) -> RawResult<usize>`
|
||||
|
||||
- **Interface Description**: Execute the statement.
|
||||
- **Return Value**: On success, returns the number of affected rows; on failure, returns an error.
|
||||
|
||||
|
@ -433,26 +473,31 @@ Data subscription mainly involves three structures, providing connection establi
|
|||
Similar to TaosBuilder, TmqBuilder provides the functionality to create consumer objects.
|
||||
|
||||
- `fn available_params() -> &'static [&'static str]`
|
||||
|
||||
- **Interface Description**: Get the list of available parameters in the DSN.
|
||||
- **Return Value**: Returns a reference to a static slice of strings, containing the names of available parameters.
|
||||
|
||||
- `fn from_dsn<D: IntoDsn>(dsn: D) -> RawResult<Self>`
|
||||
|
||||
- **Interface Description**: Create a connection using a DSN string, without checking the connection.
|
||||
- **Parameter Description**:
|
||||
- `dsn`: DSN string or a type that can be converted into DSN.
|
||||
- **Return Value**: On success, returns `RawResult` of its own type, on failure returns an error.
|
||||
|
||||
- `fn client_version() -> &'static str`
|
||||
|
||||
- **Interface Description**: Get the client version.
|
||||
- **Return Value**: Returns a static string of the client version.
|
||||
|
||||
- `fn ping(&self, conn: &mut Self::Target) -> RawResult<()>`
|
||||
|
||||
- **Interface Description**: Check if the connection is still alive.
|
||||
- **Parameter Description**:
|
||||
- `conn`: Mutable reference to the target connection.
|
||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||
|
||||
- `fn ready(&self) -> bool`
|
||||
|
||||
- **Interface Description**: Check if it is ready to connect.
|
||||
- **Return Value**: Mostly returns `true`, indicating that the address is ready to connect.
|
||||
|
||||
|
@ -465,24 +510,28 @@ Similar to TaosBuilder, TmqBuilder provides the functionality to create consumer
|
|||
The Consumer structure provides subscription-related functionalities, including subscribing, fetching messages, committing offsets, setting offsets, etc.
|
||||
|
||||
- `fn subscribe<T: Into<String>, I: IntoIterator<Item = T> + Send>(&mut self, topics: I) -> RawResult<()>`
|
||||
|
||||
- **Interface Description**: Subscribe to a series of topics.
|
||||
- **Parameter Description**:
|
||||
- `topics`: List of topics to subscribe to.
|
||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||
|
||||
- `fn recv_timeout(&self, timeout: Timeout) -> RawResult<Option<(Self::Offset, MessageSet<Self::Meta, Self::Data>)>>`
|
||||
|
||||
- **Interface Description**: Receive messages within a specified timeout period.
|
||||
- **Parameter Description**:
|
||||
- `timeout`: Timeout period.
|
||||
- **Return Value**: On success, returns messages, on failure returns an error.
|
||||
|
||||
- `fn commit(&self, offset: Self::Offset) -> RawResult<()>`
|
||||
|
||||
- **Interface Description**: Commit the given offset.
|
||||
- **Parameter Description**:
|
||||
- `offset`: The offset to commit, see the Offset structure below.
|
||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||
|
||||
- `fn commit_offset(&self, topic_name: &str, vgroup_id: VGroupId, offset: i64) -> RawResult<()>`
|
||||
|
||||
- **Interface Description**: Commit offset for a specific topic and partition.
|
||||
- **Parameter Description**:
|
||||
- `topic_name`: Topic name.
|
||||
|
@ -491,14 +540,17 @@ The Consumer structure provides subscription-related functionalities, including
|
|||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||
|
||||
- `fn list_topics(&self) -> RawResult<Vec<String>>`
|
||||
|
||||
- **Interface Description**: List all available topics.
|
||||
- **Return Value**: On success, returns a list of topics, on failure returns an error.
|
||||
|
||||
- `fn assignments(&self) -> Option<Vec<(String, Vec<Assignment>)>>`
|
||||
|
||||
- **Interface Description**: Get the current assignments of topics and partitions.
|
||||
- **Return Value**: On success, returns assignment information, on failure returns `None`.
|
||||
|
||||
- `fn offset_seek(&mut self, topic: &str, vg_id: VGroupId, offset: i64) -> RawResult<()>`
|
||||
|
||||
- **Interface Description**: Set the offset for a specific topic and partition.
|
||||
- **Parameter Description**:
|
||||
- `topic`: Topic name.
|
||||
|
@ -507,6 +559,7 @@ The Consumer structure provides subscription-related functionalities, including
|
|||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||
|
||||
- `fn committed(&self, topic: &str, vgroup_id: VGroupId) -> RawResult<i64>`
|
||||
|
||||
- **Interface Description**: Get the committed offset for a specific topic and partition.
|
||||
- **Parameter Description**:
|
||||
- `topic`: Topic name.
|
||||
|
@ -525,10 +578,12 @@ The Consumer structure provides subscription-related functionalities, including
|
|||
The Offset structure provides information about the database, topic, and partition to which the current message belongs.
|
||||
|
||||
- `fn database(&self) -> &str`
|
||||
|
||||
- **Interface Description**: Get the database name of the current message.
|
||||
- **Return Value**: Reference to the database name.
|
||||
|
||||
- `fn topic(&self) -> &str`
|
||||
|
||||
- **Interface Description**: Get the topic name of the current message.
|
||||
- **Return Value**: Reference to the topic name.
|
||||
|
||||
|
@ -536,7 +591,7 @@ The Offset structure provides information about the database, topic, and partiti
|
|||
- **Interface Description**: Get the partition ID of the current message.
|
||||
- **Return Value**: Partition ID.
|
||||
|
||||
## Appendix
|
||||
## Appendix
|
||||
|
||||
- [Rust connector documentation](https://docs.rs/taos)
|
||||
- [Rust connector project URL](https://github.com/taosdata/taos-connector-rust)
|
||||
|
|
|
@ -6,57 +6,73 @@ slug: /tdengine-reference/client-libraries/python
|
|||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import RequestId from "./_request_id.mdx";
|
||||
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||
|
||||
`taospy` is the official Python connector for TDengine. `taospy` provides a rich API, making it convenient for Python applications to use TDengine.
|
||||
`taopsy` is the official connector provided by TDengine database for Python language, which provides multiple access interfaces for database writing, querying, subscribing, etc.
|
||||
|
||||
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
|
||||
The installation command is as follows:
|
||||
``` bash
|
||||
# Native connection and REST connection
|
||||
pip3 install taospy
|
||||
|
||||
# WebSocket connection, optional installation
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
|
||||
The connector code is open sourced and hosted on Github [Taos Connector Python](https://github.com/taosdata/taos-connector-python).
|
||||
|
||||
## Connection Methods
|
||||
|
||||
`taospy` mainly provides three types of connectors. We generally recommend using **WebSocket Connection**.
|
||||
`taopsy` provides three connection methods, and we recommend using WebSocket connection.
|
||||
|
||||
- **Native Connection**, corresponding to the `taos` module of the `taospy` package. Connects to a TDengine instance natively through the TDengine client driver (taosc), supporting data writing, querying, data subscription, schemaless interface, and parameter binding interface.
|
||||
- **REST Connection**, corresponding to the `taosrest` module of the `taospy` package. Connects to a TDengine instance through the HTTP interface provided by taosAdapter, does not support schemaless and data subscription features.
|
||||
- **WebSocket Connection**, corresponding to the `taos-ws-py` package, which is optional. Connects to a TDengine instance through the WebSocket interface provided by taosAdapter, with a feature set slightly different from the native connection.
|
||||
- **Native Connection**, Python connector loads TDengine client driver (libtaos.so/taos.dll), directly connects to TDengine instance, with high performance and fast speed.
|
||||
Functionally, it supports functions such as data writing, querying, data subscription, schemaless interface, and parameter binding interface.
|
||||
- **REST Connection**, The Python connector connects to the TDengine instance through the HTTP interface provided by the taosAdapter, with minimal dependencies and no need to install the TDengine client driver.
|
||||
Functionality does not support features such as schemaless and data subscription.
|
||||
- **WebSocket Connection**, The Python connector connects to the TDengine instance through the WebSocket interface provided by the taosAdapter, which combines the advantages of the first two types of connections, namely high performance and low dependency.
|
||||
In terms of functionality, there are slight differences between the WebSocket connection implementation feature set and native connections.
|
||||
|
||||
For a detailed introduction to connection methods, please refer to: [Connection Methods](../../../developer-guide/connecting-to-tdengine/)
|
||||
For a detailed introduction of the connection method, please refer to: [Connection Method](../../../developer-guide/connecting-to-tdengine/)
|
||||
|
||||
In addition to encapsulating the native and REST interfaces, `taospy` also provides a programming interface compliant with [Python Data Access Standard (PEP 249)](https://peps.python.org/pep-0249/). This makes `taospy` easily integrated with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
||||
In addition to encapsulating Native and REST interfaces, `taopsy` also provides compliance with [the Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/) The programming interface.
|
||||
This makes it easy to integrate `taopsy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
||||
|
||||
The method of establishing a connection directly with the server using the native interface provided by the client driver is referred to as "Native Connection"; the method of establishing a connection with the server using the REST interface or WebSocket interface provided by taosAdapter is referred to as "REST Connection" or "WebSocket Connection".
|
||||
The method of establishing a connection directly with the server using the native interface provided by the client driver is referred to as "Native Connection" in the following text;
|
||||
The method of establishing a connection with the server using the REST interface or WebSocket interface provided by the taosAdapter is referred to as a "REST Connection" or "WebSocket connection" in the following text.
|
||||
|
||||
## Python Version Compatibility
|
||||
|
||||
Supports Python 3.0 and above.
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
- Native Connection [Supported Platforms](../../supported-platforms/) are consistent with the platforms supported by the TDengine client.
|
||||
- REST Connection supports all platforms that can run Python.
|
||||
-The platforms supported by native connections are consistent with those supported by the TDengine client driver.
|
||||
-WebSocket/REST connections support all platforms that can run Python.
|
||||
|
||||
### Supported Features
|
||||
## Versions History
|
||||
|
||||
- Native Connection supports all core features of TDengine, including: connection management, executing SQL, parameter binding, subscription, schemaless writing.
|
||||
- REST Connection supports features including: connection management, executing SQL. (Through executing SQL, you can: manage databases, manage tables and supertables, write data, query data, create continuous queries, etc.)
|
||||
Python Connector historical versions (it is recommended to use the latest version of 'taopsy'):
|
||||
|
||||
## Version History
|
||||
|Python Connector Version | Major Changes | TDengine Version|
|
||||
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
|
||||
|2.7.18 | Support Apache SuperSet BI Tools. | - |
|
||||
|2.7.16 | Add subscription configuration (session. timeout. ms, Max. roll. interval. ms). | - |
|
||||
|2.7.15 | Added support for VARBINRY and GEOMETRY types. | - |
|
||||
|2.7.14 | Fix Known Issues. | - |
|
||||
|2.7.13 | Added tmq synchronous submission offset interface. | - |
|
||||
|2.7.12 | 1. Added support for varbinary type (STMT currently does not support varbinary). <br/> 2 Query performance improvement (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209) ). | 3.1.1.2 and higher|
|
||||
|2.7.9 | Data subscription supports obtaining and resetting consumption progress. | 3.0.2.6 and higher|
|
||||
|2.7.8 | Added 'executioner_many'. | 3.0.0.0 and higher|
|
||||
|
||||
Regardless of the version of TDengine used, it is recommended to use the latest version of `taospy`.
|
||||
WebSocket Connector Historical Versions:
|
||||
|
||||
|Python Connector Version|Main Changes|
|
||||
|:-------------------:|:----:|
|
||||
|2.7.16|Added subscription configuration (session.timeout.ms, max.poll.interval.ms)|
|
||||
|2.7.15|Added support for VARBINARY and GEOMETRY types|
|
||||
|2.7.14|Fixed known issues|
|
||||
|2.7.13|Added tmq synchronous commit offset interface|
|
||||
|2.7.12|1. Added support for varbinary type (STMT does not support varbinary yet) <br/> 2. Improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
||||
|2.7.9|Data subscription supports obtaining consumption progress and resetting consumption progress|
|
||||
|2.7.8|Added `execute_many`|
|
||||
|
||||
|Python WebSocket Connector Version|Major Changes|
|
||||
|:----------------------------:|:-----:|
|
||||
|0.3.5|Added support for VARBINARY and GEOMETRY types, fixed known issues|
|
||||
|0.3.2|Optimized WebSocket SQL query and insertion performance, updated readme and documentation, fixed known issues|
|
||||
|0.2.9|Fixed known issues|
|
||||
|0.2.5|1. Data subscription supports obtaining and resetting consumption progress <br/> 2. Supports schemaless <br/> 3. Supports STMT|
|
||||
|0.2.4|Data subscription adds unsubscribe method|
|
||||
|WebSocket Connector Version | Major Changes | TDengine Version|
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------- | ----------------- |
|
||||
|0.3.5 | Added support for VARBINARY and GEOMETRY types, fixed known issues. | 3.3.0.0 and higher|
|
||||
|0.3.2 | Optimize WebSocket SQL query and insertion performance, modify readme and documentation, fix known issues. | 3.2.3.0 and higher|
|
||||
|0.2.9 | Known issue fixes. | - |
|
||||
|0.2.5 | 1. Data subscription supports obtaining and resetting consumption progress. <br/>2 Support schemaless. <br/>3 Support STMT. | - |
|
||||
|0.2.4 | Data Subscription Addition/Unsubscribe Method. | 3.0.5.0 and higher|
|
||||
|
||||
## Exception Handling
|
||||
|
||||
|
@ -69,7 +85,7 @@ The Python connector may generate 4 types of exceptions:
|
|||
- For other TDengine module errors, please refer to [Error Codes](../../error-codes/)
|
||||
|
||||
|Error Type|Description|Suggested Actions|
|
||||
|:--------:|:---------:|:---------------:|
|
||||
|:---------|:----------|:----------------|
|
||||
|InterfaceError|taosc version too low, does not support the used interface|Please check the TDengine client version|
|
||||
|ConnectionError|Database connection error|Please check the TDengine server status and connection parameters|
|
||||
|DatabaseError|Database error|Please check the TDengine server version and upgrade the Python connector to the latest version|
|
||||
|
@ -94,7 +110,7 @@ All database operations in the Python Connector, if an exception occurs, will be
|
|||
TDengine currently supports timestamp, numeric, character, boolean types, and the corresponding Python type conversions are as follows:
|
||||
|
||||
|TDengine DataType|Python DataType|
|
||||
|:---------------:|:-------------:|
|
||||
|:---------------|:--------------|
|
||||
|TIMESTAMP|datetime|
|
||||
|INT|int|
|
||||
|BIGINT|int|
|
||||
|
@ -129,7 +145,7 @@ Example program source code can be found at:
|
|||
## About Nanosecond (nanosecond)
|
||||
|
||||
Due to the current imperfect support for nanoseconds in Python (see the links below), the current implementation returns an integer when nanosecond precision is used, rather than the datetime type returned for ms and us. Application developers need to handle this themselves, and it is recommended to use pandas' to_datetime(). If Python officially fully supports nanoseconds in the future, the Python connector may modify the relevant interfaces.
|
||||
|
||||
|
||||
## Common Questions
|
||||
|
||||
Feel free to [ask questions or report issues](https://github.com/taosdata/taos-connector-python/issues).
|
||||
|
|
|
@ -6,29 +6,27 @@ slug: /tdengine-reference/client-libraries/node
|
|||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import RequestId from "./_request_id.mdx";
|
||||
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||
|
||||
`@tdengine/websocket` is the official Node.js language connector for TDengine. Node.js developers can use it to develop applications that access the TDengine database.
|
||||
|
||||
The source code for the Node.js connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-node/tree/main).
|
||||
|
||||
## Connection Method
|
||||
## Node.js Version Compatibility
|
||||
|
||||
The Node.js connector currently only supports WebSocket connections, which connect to a TDengine instance through the WebSocket interface provided by taosAdapter.
|
||||
|
||||
For a detailed introduction to the connection method, please refer to: [Connection Method](../../../developer-guide/connecting-to-tdengine/)
|
||||
Supports Node.js 14 and above.
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
Supports Node.js version 14 and above.
|
||||
Support all platforms that can run Node.js.
|
||||
|
||||
## Version History
|
||||
|
||||
| Node.js Connector Version | Major Changes | TDengine Version |
|
||||
| :------------------: | :----------------------: | :----------------: |
|
||||
| 3.1.2 | Optimized data protocol and parsing, significantly improved performance| 3.3.2.0 and higher versions |
|
||||
| 3.1.1 | Optimized data transmission performance | 3.3.2.0 and higher versions |
|
||||
| 3.1.0 | New release, supports WebSocket connection | 3.2.0.0 and higher versions |
|
||||
| Node.js Connector Version | Major Changes | TDengine Version |
|
||||
| ------------------------- | ------------------------------------------------------------------------ | --------------------------- |
|
||||
| 3.1.2 | Optimized data protocol and parsing, significantly improved performance. | - |
|
||||
| 3.1.1 | Optimized data transmission performance. | 3.3.2.0 and higher versions |
|
||||
| 3.1.0 | New release, supports WebSocket connection. | 3.2.0.0 and higher versions |
|
||||
|
||||
## Exception Handling
|
||||
|
||||
|
@ -38,19 +36,19 @@ Error description: Node.js connector error codes range from 100 to 110, errors o
|
|||
|
||||
For specific connector error codes, please refer to:
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | -------------------------------------------------------------| ----------------------------------------------------------------------------------------- |
|
||||
| 100 | invalid variables | The parameters are illegal, please check the corresponding interface specifications and adjust the parameter types and sizes. |
|
||||
| 101 | invalid url | URL error, please check if the URL is correctly filled. |
|
||||
| 102 | received server data but did not find a callback for processing | Received server data but no upper layer callback was found |
|
||||
| 103 | invalid message type | Received message type unrecognized, please check if the server is normal. |
|
||||
| 104 | connection creation failed | Connection creation failed, please check if the network is normal. |
|
||||
| 105 | websocket request timeout | Request timed out |
|
||||
| 106 | authentication fail | Authentication failed, please check if the username and password are correct. |
|
||||
| 107 | unknown sql type in tdengine | Please check the Data Type types supported by TDengine. |
|
||||
| 108 | connection has been closed | The connection has been closed, please check if the Connection is used again after closing, or if the connection is normal. |
|
||||
| 109 | fetch block data parse fail | Failed to parse the fetched query data |
|
||||
| 110 | websocket connection has reached its maximum limit | WebSocket connection has reached its maximum limit |
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 100 | invalid variables | The parameters are illegal, please check the corresponding interface specifications and adjust the parameter types and sizes. |
|
||||
| 101 | invalid url | URL error, please check if the URL is correctly filled. |
|
||||
| 102 | received server data but did not find a callback for processing | Received server data but no upper layer callback was found |
|
||||
| 103 | invalid message type | Received message type unrecognized, please check if the server is normal. |
|
||||
| 104 | connection creation failed | Connection creation failed, please check if the network is normal. |
|
||||
| 105 | websocket request timeout | Request timed out |
|
||||
| 106 | authentication fail | Authentication failed, please check if the username and password are correct. |
|
||||
| 107 | unknown sql type in tdengine | Please check the Data Type types supported by TDengine. |
|
||||
| 108 | connection has been closed | The connection has been closed, please check if the Connection is used again after closing, or if the connection is normal. |
|
||||
| 109 | fetch block data parse fail | Failed to parse the fetched query data |
|
||||
| 110 | websocket connection has reached its maximum limit | WebSocket connection has reached its maximum limit |
|
||||
|
||||
- [TDengine Node.js Connector Error Code](https://github.com/taosdata/taos-connector-node/blob/main/nodejs/src/common/wsError.ts)
|
||||
- For errors from other TDengine modules, please refer to [Error Codes](../../error-codes/)
|
||||
|
@ -59,38 +57,38 @@ For specific connector error codes, please refer to:
|
|||
|
||||
The table below shows the mapping between TDengine DataType and Node.js DataType
|
||||
|
||||
| TDengine DataType | Node.js DataType|
|
||||
|-------------------|-------------|
|
||||
| TIMESTAMP | bigint |
|
||||
| TINYINT | number |
|
||||
| SMALLINT | number |
|
||||
| INT | number |
|
||||
| BIGINT | bigint |
|
||||
| TINYINT UNSIGNED | number |
|
||||
| SMALLINT UNSIGNED | number |
|
||||
| INT UNSIGNED | number |
|
||||
| BIGINT UNSIGNED | bigint |
|
||||
| FLOAT | number |
|
||||
| DOUBLE | number |
|
||||
| BOOL | boolean |
|
||||
| BINARY | string |
|
||||
| NCHAR | string |
|
||||
| JSON | string |
|
||||
| VARBINARY | ArrayBuffer |
|
||||
| GEOMETRY | ArrayBuffer |
|
||||
| TDengine DataType | Node.js DataType |
|
||||
| ----------------- | ---------------- |
|
||||
| TIMESTAMP | bigint |
|
||||
| TINYINT | number |
|
||||
| SMALLINT | number |
|
||||
| INT | number |
|
||||
| BIGINT | bigint |
|
||||
| TINYINT UNSIGNED | number |
|
||||
| SMALLINT UNSIGNED | number |
|
||||
| INT UNSIGNED | number |
|
||||
| BIGINT UNSIGNED | bigint |
|
||||
| FLOAT | number |
|
||||
| DOUBLE | number |
|
||||
| BOOL | boolean |
|
||||
| BINARY | string |
|
||||
| NCHAR | string |
|
||||
| JSON | string |
|
||||
| VARBINARY | ArrayBuffer |
|
||||
| GEOMETRY | ArrayBuffer |
|
||||
|
||||
**Note**: JSON type is only supported in tags.
|
||||
|
||||
## More Example Programs
|
||||
|
||||
| Example Program | Description of Example Program |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- |
|
||||
| [sql_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/sql_example.js) | Basic usage such as establishing connections, executing SQL, etc. |
|
||||
| [stmt_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/stmt_example.js) | Example of binding parameters for insertion. |
|
||||
| [line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/line_example.js) | Line protocol writing example. |
|
||||
| [tmq_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/tmq_example.js) | Example of using subscriptions. |
|
||||
| [all_type_query](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_query.js) | Example supporting all types. |
|
||||
| [all_type_stmt](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_stmt.js) | Example of parameter binding supporting all types. |
|
||||
| Example Program | Description of Example Program |
|
||||
| ---------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
|
||||
| [sql_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/sql_example.js) | Basic usage such as establishing connections, executing SQL, etc. |
|
||||
| [stmt_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/stmt_example.js) | Example of binding parameters for insertion. |
|
||||
| [line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/line_example.js) | Line protocol writing example. |
|
||||
| [tmq_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/tmq_example.js) | Example of using subscriptions. |
|
||||
| [all_type_query](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_query.js) | Example supporting all types. |
|
||||
| [all_type_stmt](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_stmt.js) | Example of parameter binding supporting all types. |
|
||||
|
||||
## Usage Restrictions
|
||||
|
||||
|
|
|
@ -6,42 +6,31 @@ slug: /tdengine-reference/client-libraries/csharp
|
|||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import RequestId from "./_request_id.mdx";
|
||||
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||
|
||||
`TDengine.Connector` is the C# language connector provided by TDengine. C# developers can use it to develop C# applications that access data in the TDengine cluster.
|
||||
|
||||
## Connection Methods
|
||||
## .Net Version Compatibility
|
||||
|
||||
`TDengine.Connector` provides two types of connectors:
|
||||
|
||||
* **Native Connection**, which connects to a TDengine instance natively through the TDengine client driver (taosc), supporting data writing, querying, data subscription, schemaless interfaces, and parameter binding interfaces.
|
||||
* **WebSocket Connection**, which connects to a TDengine instance through the WebSocket interface provided by taosAdapter, with a slightly different set of features implemented compared to the native connection. (From v3.0.1 onwards)
|
||||
|
||||
For a detailed introduction to connection methods, please refer to: [Connection Methods](../../../developer-guide/connecting-to-tdengine/)
|
||||
|
||||
## Compatibility
|
||||
|
||||
* `TDengine.Connector` version 3.1.0 has been completely restructured and is no longer compatible with versions 3.0.2 and earlier. For documentation on version 3.0.2, please refer to [nuget](https://www.nuget.org/packages/TDengine.Connector/3.0.2)
|
||||
* `TDengine.Connector` 3.x is not compatible with TDengine 2.x. If you need to use the C# connector in an environment running TDengine 2.x, please use version 1.x of TDengine.Connector.
|
||||
- Supports .NET Framework 4.6 and above.
|
||||
- Supports .NET 5.0 and above.
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
The supported platforms are consistent with those supported by the TDengine client driver.
|
||||
- Native connection supports the same platforms as the TDengine client driver.
|
||||
- WebSocket connection supports all platforms that can run the .NET runtime.
|
||||
|
||||
:::warning
|
||||
TDengine no longer supports the 32-bit Windows platform.
|
||||
:::
|
||||
## Version History
|
||||
|
||||
## Version Support
|
||||
| Connector Version | Major Changes | TDengine Version |
|
||||
|------------------|-------------------------------------------------|-------------------|
|
||||
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
|
||||
| 3.1.2 | Fixed schemaless resource release. | - |
|
||||
| 3.1.1 | Supported varbinary and geometry types. | - |
|
||||
| 3.1.0 | WebSocket uses a native C# implementation. | 3.2.1.0 and higher |
|
||||
|
||||
| **Connector Version** | **TDengine Version** | **Main Features** |
|
||||
|------------------|------------------|----------------------------|
|
||||
| 3.1.3 | 3.2.1.0/3.1.1.18 | Supports WebSocket auto-reconnect |
|
||||
| 3.1.2 | 3.2.1.0/3.1.1.18 | Fixes schemaless resource release |
|
||||
| 3.1.1 | 3.2.1.0/3.1.1.18 | Supports varbinary and geometry types |
|
||||
| 3.1.0 | 3.2.1.0/3.1.1.18 | Native implementation of WebSocket |
|
||||
|
||||
## Exception Handling
|
||||
## Exceptions and Error Codes
|
||||
|
||||
`TDengine.Connector` will throw exceptions, and applications need to handle these exceptions. The taosc exception type `TDengineError` includes an error code and error message, which applications can use to handle the error.
|
||||
For error reporting in other TDengine modules, please refer to [Error Codes](../../error-codes/)
|
||||
|
|
|
@ -6,7 +6,7 @@ slug: /tdengine-reference/client-libraries/r-lang
|
|||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import Rdemo from "../../07-develop/_connect_r.mdx"
|
||||
import Rdemo from "../../assets/resources/_connect_r.mdx"
|
||||
|
||||
The RJDBC library in R language can enable R language programs to access TDengine data. Here are the installation process, configuration process, and R language example code.
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ TDengine ODBC provides both 64-bit and 32-bit drivers. However, the 32-bit versi
|
|||
|
||||
## ODBC Version Compatibility
|
||||
|
||||
- Supports ODBC 3.8 and all previous versions.
|
||||
Supports all ODBC versions.
|
||||
|
||||
## Installation
|
||||
|
||||
|
@ -119,12 +119,12 @@ In addition to this, the WebSocket connection method also supports 32-bit applic
|
|||
|
||||
## Version History
|
||||
|
||||
| taos_odbc Version | Main Changes | TDengine Version |
|
||||
| :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- |
|
||||
| v1.1.0 | 1. Supports view functionality;<br/>2. Supports VARBINARY/GEOMETRY data types;<br/>3. Supports ODBC 32-bit WebSocket connection method (Enterprise edition only);<br/>4. Supports ODBC data source configuration dialog settings for compatibility adaptation options for industrial software like KingSCADA, Kepware, etc. (Enterprise edition only); | 3.3.3.0 and higher |
|
||||
| v1.0.2 | Supports CP1252 character encoding; | 3.2.3.0 and higher |
|
||||
| v1.0.1 | 1. Supports DSN settings for BI mode, in BI mode TDengine database does not return system database and supertable subtable information;<br/>2. Refactored character set conversion module, improving read and write performance;<br/>3. Default connection method in ODBC data source configuration dialog changed to "WebSocket";<br/>4. Added "Test Connection" control in ODBC data source configuration dialog;<br/>5. ODBC data source configuration supports Chinese/English interface; | - |
|
||||
| v1.0.0.0 | Initial release, supports interacting with Tdengine database to read and write data, refer to the "API Reference" section for details | 3.2.2.0 and higher |
|
||||
| taos_odbc Version | Major Changes | TDengine Version |
|
||||
| ----------- | -------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| v1.1.0 | 1. Supports view functionality. <br/>2. Supports VARBINARY/GEOMETRY data types. <br/>3. Supports ODBC 32-bit WebSocket connection method (Enterprise edition only). <br/>4. Supports ODBC data source configuration dialog settings for compatibility adaptation options for industrial software like KingSCADA, Kepware, etc. (Enterprise edition only). | 3.3.3.0 and higher |
|
||||
| v1.0.2 | Supports CP1252 character encoding. | 3.2.3.0 and higher |
|
||||
| v1.0.1 | 1. Supports DSN settings for BI mode, in BI mode TDengine database does not return system database and supertable subtable information. <br/>2. Refactored character set conversion module, improving read and write performance. <br/> 3. Default connection method in ODBC data source configuration dialog changed to "WebSocket". <br/>4. Added "Test Connection" control in ODBC data source configuration dialog. <br/>5. ODBC data source configuration supports Chinese/English interface. | - |
|
||||
| v1.0.0.0 | Initial release, supports interacting with Tdengine database to read and write data, refer to the "API Reference" section for details. | 3.2.2.0 and higher |
|
||||
|
||||
## Data Type Mapping
|
||||
|
||||
|
|
|
@ -7,12 +7,12 @@ import Tabs from "@theme/Tabs";
|
|||
import TabItem from "@theme/TabItem";
|
||||
import Image from '@theme/IdealImage';
|
||||
import imgClientLib from '../../assets/client-libraries-01.png';
|
||||
import InstallOnLinux from "./_linux_install.mdx";
|
||||
import InstallOnWindows from "./_windows_install.mdx";
|
||||
import InstallOnMacOS from "./_macos_install.mdx";
|
||||
import VerifyWindows from "./_verify_windows.mdx";
|
||||
import VerifyLinux from "./_verify_linux.mdx";
|
||||
import VerifyMacOS from "./_verify_macos.mdx";
|
||||
import InstallOnLinux from "../../assets/resources/_linux_install.mdx";
|
||||
import InstallOnWindows from "../../assets/resources/_windows_install.mdx";
|
||||
import InstallOnMacOS from "../../assets/resources/_macos_install.mdx";
|
||||
import VerifyWindows from "../../assets/resources/_verify_windows.mdx";
|
||||
import VerifyLinux from "../../assets/resources/_verify_linux.mdx";
|
||||
import VerifyMacOS from "../../assets/resources/_verify_macos.mdx";
|
||||
|
||||
TDengine provides a rich set of application development interfaces. To facilitate users in quickly developing their own applications, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to the TDengine cluster using the native interface (taosc) and WebSocket interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, Lua connector, and PHP connector.
|
||||
|
||||
|
|
|
@ -72,6 +72,8 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000133 | Invalid operation | Invalid or unsupported operation | 1. Modify to confirm the current operation is legal and supported, check parameter validity 2. If the problem persists, preserve the scene and logs, report issue on github |
|
||||
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
||||
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
||||
| 0x8000013C | Invalid disk id | Invalid disk id | Check users whether the mounted disk is invalid or use the parameter diskIDCheckEnabled to skip the disk check. |
|
||||
|
||||
|
||||
## tsc
|
||||
|
||||
|
@ -129,7 +131,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000350 | User already exists | Create user, duplicate creation | Confirm if the operation is correct |
|
||||
| 0x80000351 | Invalid user | User does not exist | Confirm if the operation is correct |
|
||||
| 0x80000352 | Invalid user format | Incorrect format | Confirm if the operation is correct |
|
||||
| 0x80000353 | Invalid password format | Incorrect format | Confirm if the operation is correct |
|
||||
| 0x80000353 | Invalid password format | The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. | Confirm the format of the password string |
|
||||
| 0x80000354 | Can not get user from conn | Internal error | Report issue |
|
||||
| 0x80000355 | Too many users | (Enterprise only) Exceeding user limit | Adjust configuration |
|
||||
| 0x80000357 | Authentication failure | Incorrect password | Confirm if the operation is correct |
|
||||
|
@ -284,6 +286,9 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000729 | Task message error | Query message error | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x8000072B | Task status error | Subquery status error | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x8000072F | Job not exist | Query JOB no longer exists | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80000739 | Query memory upper limit is reached | Single query memory upper limit is reached | Modify memory upper limit size or optimize SQL |
|
||||
| 0x8000073A | Query memory exhausted | Query memory in dnode is exhausted | Limit concurrent queries or add more physical memory |
|
||||
| 0x8000073B | Timeout for long time no fetch | Query without fetch for a long time | Correct application to fetch data asap |
|
||||
|
||||
## grant
|
||||
|
||||
|
@ -381,7 +386,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x8000260D | Tags number not matched | Mismatched number of tag columns | Check and correct the SQL statement |
|
||||
| 0x8000260E | Invalid tag name | Invalid or non-existent tag name | Check and correct the SQL statement |
|
||||
| 0x80002610 | Value is too long | Value length exceeds limit | Check and correct the SQL statement or API parameters |
|
||||
| 0x80002611 | Password can not be empty | Password is empty | Use a valid password |
|
||||
| 0x80002611 | Password too short or empty | Password is empty or less than 8 chars | Use a valid password |
|
||||
| 0x80002612 | Port should be an integer that is less than 65535 and greater than 0 | Illegal port number | Check and correct the port number |
|
||||
| 0x80002613 | Endpoint should be in the format of 'fqdn:port' | Incorrect address format | Check and correct the address information |
|
||||
| 0x80002614 | This statement is no longer supported | Feature has been deprecated | Refer to the feature documentation |
|
||||
|
|
|
@ -328,8 +328,35 @@ In addition to precomputation, TDengine also supports various downsampling stora
|
|||
|
||||
### Multi-Level Storage and Object Storage
|
||||
|
||||
By default, TDengine stores all data in the /var/lib/taos directory. To expand storage capacity, reduce potential bottlenecks caused by file reading, and enhance data throughput, TDengine allows the use of the configuration parameter `dataDir` to enable the cluster to utilize multiple mounted hard drives simultaneously.
|
||||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
|
||||
|
||||
dataDir format is as follows:
|
||||
|
||||
```
|
||||
dataDir data_path [tier_level] [primary] [disable_create_new_file]
|
||||
```
|
||||
|
||||
Where `data_path` is the folder path of mount point, and `tier_level` is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. And `primary` means whether the data dir is the primary mount point. Enter 0 for false or 1 for true. The default value is 1. A TDengine cluster can have only one `primary` mount point, which must be on tier 0. And `disable_create_new_file` means whether to prohibit the creation of new file sets on the specified mount point. Enter 0 for false and 1 for true. The default value is 0. Tier 0 storage must have at least one mount point with disable_create_new_file set to 0. Tier 1 and tier 2 storage do not have this restriction.
|
||||
|
||||
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
||||
|
||||
```
|
||||
dataDir /mnt/disk1/taos 0 1 0
|
||||
dataDir /mnt/disk2/taos 0 0 0
|
||||
dataDir /mnt/disk3/taos 1 0 0
|
||||
dataDir /mnt/disk4/taos 1 0 1
|
||||
dataDir /mnt/disk5/taos 2 0 0
|
||||
dataDir /mnt/disk6/taos 2 0 0
|
||||
```
|
||||
|
||||
Mounted disks can also be a non-local network disk, as long as the system can access it.
|
||||
|
||||
You can use the following command to dynamically modify dataDir to control whether disable_create_new_file is enabled for the current directory.
|
||||
|
||||
```
|
||||
alter dnode 1 "/mnt/disk2/taos 1";
|
||||
```
|
||||
|
||||
Note: Tiered Storage is only supported in Enterprise Edition
|
||||
|
||||
Additionally, TDengine offers tiered data storage functionality, allowing users to store data from different time periods in directories on different storage devices. This facilitates the separation of "hot" data (frequently accessed) and "cold" data (less frequently accessed), making full use of various storage resources while saving costs. For example, data that is recently collected and requires frequent access can be stored on high-performance solid-state drives due to their high read performance requirements. Data that exceeds a certain age and has lower query demands can be stored on mechanically driven hard disks, which are relatively cheaper.
|
||||
|
||||
To further reduce storage costs, TDengine also supports storing time-series data in object storage systems. Through its innovative design, in most cases, the performance of querying time-series data from object storage systems is close to half that of local disks, and in some scenarios, the performance can even be comparable to local disks. Additionally, TDengine allows users to perform delete and update operations on time-series data stored in object storage.
|
||||
|
|
|
@ -286,4 +286,14 @@ This connection only reports the most basic information that does not involve an
|
|||
This feature is an optional configuration item, which is enabled by default in the open-source version. The specific parameter is telemetryReporting, as explained in the [official documentation](../tdengine-reference/components/taosd/).
|
||||
You can disable this parameter at any time by modifying telemetryReporting to 0 in taos.cfg, then restarting the database service.
|
||||
Code located at: [https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c](https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c).
|
||||
Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational.
|
||||
Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational.
|
||||
|
||||
### 31 What should I do if I encounter 'Sync leader is unreachable' when connecting to the cluster for the first time?
|
||||
|
||||
Reporting this error indicates that the first connection to the cluster was successful, but the IP address accessed for the first time was not the leader of mnode. An error occurred when the client attempted to establish a connection with the leader. The client searches for the leader node through EP, which specifies the fqdn and port number. There are two common reasons for this error:
|
||||
|
||||
- The ports of other dnodes in the cluster are not open
|
||||
- The client's hosts file is not configured correctly
|
||||
|
||||
Therefore, first, check whether all ports on the server and cluster (default 6030 for native connections and 6041 for HTTP connections) are open; Next, check if the client's hosts file has configured the fqdn and IP information for all dnodes in the cluster.
|
||||
If the issue still cannot be resolved, it is necessary to contact Taos technical personnel for support.
|
||||
|
|
|
@ -26,7 +26,6 @@ async function createDbAndTable() {
|
|||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
conf.setDb('power');
|
||||
wsSql = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
// create database
|
||||
|
|
|
@ -40,7 +40,6 @@ async function prepare() {
|
|||
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
conf.setDb('power');
|
||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||
|
||||
|
|
|
@ -34,10 +34,10 @@ async function createConsumer() {
|
|||
}
|
||||
|
||||
async function prepare() {
|
||||
let conf = new taos.WSConfig('ws://192.168.1.98:6041');
|
||||
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
conf.setDb('power');
|
||||
|
||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
|
|||
|
||||
如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
|
||||
|
||||
如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinterna)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。
|
||||
如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinternal)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。
|
||||
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
|
||||
|
||||
|
|
|
@ -8,8 +8,6 @@ import Tabs from "@theme/Tabs";
|
|||
import TabItem from "@theme/TabItem";
|
||||
import PkgListV3 from "/components/PkgListV3";
|
||||
|
||||
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||
|
||||
TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序(CLI,taos)和一些工具软件。目前 TDinsight 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/components/taosadapter/) 提供 [RESTful 接口](../../reference/connector/rest-api/)。
|
||||
|
||||
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。
|
||||
|
@ -319,4 +317,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
|
|||
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
|
||||
```
|
||||
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
||||
|
|
|
@ -10,7 +10,7 @@ import official_account from './official-account.webp'
|
|||
|
||||
TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/components/taosadapter) 提供 [RESTful 接口](../reference/connector/rest-api)。
|
||||
|
||||
本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。
|
||||
本章主要介绍如何快速设置 TDengine 环境并体验其高效写入和查询。
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
@ -34,4 +34,4 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
|
|||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 视频号<br/>收看技术直播与教学视频</td>
|
||||
<td style={{padding:'1em 3em',border:0}}>关注 TDengine 公众号<br/>阅读技术文章与行业案例</td>
|
||||
</tr>
|
||||
</table>
|
||||
</table>
|
||||
|
|
|
@ -175,7 +175,7 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项
|
|||
|
||||
用户可以为每个 partition 对应的子表生成自定义的 TAG 值,如下创建流的语句,
|
||||
```sql
|
||||
CREATE STREAM output_tag trigger at_once INTO output_tag_s TAGS(alias_tag varchar(100)) as select _wstart, count(*) from power.meters partition by concat("tag-", tbname) as alias_tag interval(10s));
|
||||
CREATE STREAM output_tag trigger at_once INTO output_tag_s TAGS(alias_tag varchar(100)) as select _wstart, count(*) from power.meters partition by concat("tag-", tbname) as alias_tag interval(10s);
|
||||
```
|
||||
|
||||
在 PARTITION 子句中,为 concat("tag-", tbname)定义了一个别名 alias_tag, 对应超级表 output_tag_s 的自定义 TAG 的名字。在上述示例中,流新创建的子表的 TAG 将以前缀 'tag-' 连接原表名作为 TAG 的值。会对 TAG 信息进行如下检查。
|
||||
|
|
|
@ -166,6 +166,12 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
### 8. 创建完成
|
||||
### 8. 异常处理策略
|
||||
|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
<Contributing />
|
||||
|
||||
### 9. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 MQTT 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
|
@ -196,12 +196,16 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||
### 8. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||

|
||||
<AdvancedOptions/>
|
||||
|
||||

|
||||
### 9. 异常处理策略
|
||||
|
||||
### 9. 创建完成
|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
<Contributing />
|
||||
|
||||
### 10. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 Kafka 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
|
@ -75,9 +75,9 @@ InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量
|
|||
|
||||
### 6. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||

|
||||

|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
<AdvancedOptions/>
|
||||
|
||||
### 7. 创建完成
|
||||
|
|
@ -58,9 +58,9 @@ OpenTSDB 是一个架构在 HBase 系统之上的实时监控信息收集和展
|
|||
|
||||
### 5. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||

|
||||

|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
<AdvancedOptions/>
|
||||
|
||||
### 6. 创建完成
|
||||
|
|
@ -107,13 +107,25 @@ sidebar_label: "CSV"
|
|||
|
||||

|
||||
|
||||
### 5. 创建完成
|
||||
### 5. 配置高级选项
|
||||
|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
<AdvancedOptions/>
|
||||
|
||||
### 6. 异常处理策略
|
||||
|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
<Contributing />
|
||||
|
||||
### 7. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到数据写入任务列表页面,可查看任务执行情况,也可以进行任务的“启动/停止”操作与“查看/编辑/删除/复制”操作。
|
||||
|
||||

|
||||
|
||||
### 6. 查看运行指标
|
||||
### 8. 查看运行指标
|
||||
|
||||
点击 **查看** 按钮,查看任务的运行指标,同时也可以查看任务中所有文件的处理情况。
|
||||
|
|
@ -134,6 +134,12 @@ split 提取器,seperator 填写分割符 `,`, number 填写 2。
|
|||
|
||||

|
||||
|
||||
### 7. 创建完成
|
||||
### 7. 异常处理策略
|
||||
|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
<Contributing />
|
||||
|
||||
### 8. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建任务。提交任务后,回到**数据写入**页面可以查看任务状态。
|
|
@ -98,14 +98,16 @@ MySQL 是最流行的关系型数据库之一。很多系统都曾经或正在
|
|||
|
||||
### 8. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。
|
||||
<AdvancedOptions/>
|
||||
|
||||
**批次大小** 单次发送的最大消息数或行数。默认是 10000。
|
||||
### 9. 异常处理策略
|
||||
|
||||

|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
### 9. 创建完成
|
||||
<Contributing />
|
||||
|
||||
### 10. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 MySQL 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
|
@ -99,14 +99,16 @@ TDengine 可以高效地从 PostgreSQL 读取数据并将其写入 TDengine,
|
|||
|
||||
### 8. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。
|
||||
<AdvancedOptions/>
|
||||
|
||||
**批次大小** 单次发送的最大消息数或行数。默认是 10000。
|
||||
### 9. 异常处理策略
|
||||
|
||||

|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
### 9. 创建完成
|
||||
<Contributing />
|
||||
|
||||
### 10. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 PostgreSQL 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
|
@ -91,14 +91,16 @@ TDengine 可以高效地从 Oracle 读取数据并将其写入 TDengine,以实
|
|||
|
||||
### 7. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。
|
||||
<AdvancedOptions/>
|
||||
|
||||
**批次大小** 单次发送的最大消息数或行数。默认是 10000。
|
||||
### 8. 异常处理策略
|
||||
|
||||

|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
### 8. 创建完成
|
||||
<Contributing />
|
||||
|
||||
### 9. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 Oracle 到 TDengine 的数据同步任务,回到**数据源列表****页面可查看任务执行情况。
|
|
@ -105,14 +105,16 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都
|
|||
|
||||
### 8. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。
|
||||
<AdvancedOptions/>
|
||||
|
||||
**批次大小** 单次发送的最大消息数或行数。默认是 10000。
|
||||
### 9. 异常处理策略
|
||||
|
||||

|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
### 9. 创建完成
|
||||
<Contributing />
|
||||
|
||||
### 10. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 Microsoft SQL Server 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
|
@ -122,14 +122,16 @@ MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品
|
|||
|
||||
### 8. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
import AdvancedOptions from './_02-advanced_options.mdx'
|
||||
|
||||
**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。
|
||||
<AdvancedOptions/>
|
||||
|
||||
**批次大小** 单次发送的最大消息数或行数。默认是 10000。
|
||||
### 9. 异常处理策略
|
||||
|
||||

|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
### 9. 创建完成
|
||||
<Contributing />
|
||||
|
||||
### 10. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 MongoDB 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
|
@ -0,0 +1,7 @@
|
|||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
|
||||
**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。
|
||||
|
||||
**批次大小** 单次发送的最大消息数或行数。默认是 10000。
|
||||
|
||||

|
|
@ -0,0 +1,23 @@
|
|||
异常处理策略区域是对数据异常时的处理策略进行配置,默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
|
||||

|
||||
|
||||
各异常项说明及相应可选处理策略如下:
|
||||
|
||||
> 通用处理策略说明:
|
||||
> 归档:将异常数据写入归档文件(默认路径为 `${data_dir}/tasks/_id/.datetime`),不写入目标库
|
||||
> 丢弃:将异常数据忽略,不写入目标库
|
||||
> 报错:任务报错
|
||||
|
||||
- **主键时间戳溢出** 检查数据中第一列时间戳是否在正确的时间范围内(now - keep1, now + 100y),可选处理策略:归档、丢弃、报错
|
||||
- **主键时间戳空** 检查数据中第一列时间戳是否为空,可选处理策略:归档、丢弃、报错、使用当前时间
|
||||
> 使用当前时间:使用当前时间填充到空的时间戳字段中
|
||||
- **表名长度溢出** 检查子表表名的长度是否超出限制(最大 192 字符),可选处理策略:归档、丢弃、报错、截断、截断且归档
|
||||
> 截断:截取原始表名的前 192 个字符作为新的表名
|
||||
> 截断且归档:截取原始表名的前 192 个字符作为新的表名,并且将此行记录写入归档文件
|
||||
- **表名非法字符** 检查子表表名中是否包含特殊字符(符号 `.` 等),可选处理策略:归档、丢弃、报错、非法字符替换为指定字符串
|
||||
> 非法字符替换为指定字符串:将原始表名中的特殊字符替换为后方输入框中的指定字符串,例如 `a.b` 替换为 `a_b`
|
||||
- **表名模板变量空值** 检查子表表名模板中的变量是否为空,可选处理策略:丢弃、留空、变量替换为指定字符串
|
||||
> 留空:变量位置不做任何特殊处理,例如 `a_{x}` 转换为 `a_`
|
||||
> 变量替换为指定字符串:变量位置使用后方输入框中的指定字符串,例如 `a_{x}` 转换为 `a_b`
|
||||
- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错
|
Binary file not shown.
After Width: | Height: | Size: 19 KiB |
|
@ -294,9 +294,32 @@ let v3 = data["voltage"].split(",");
|
|||
|
||||
在任务列表页面,还可以对任务进行启动、停止、查看、删除、复制等操作,也可以查看各个任务的运行情况,包括写入的记录条数、流量等。
|
||||
|
||||
### 健康状态
|
||||
|
||||
从 3.3.5.0 开始,在任务管理列表中,增加了一项 ”健康状态“,用于指示当前任务运行过程中的健康状态。
|
||||
|
||||
在数据源的”高级选项“列表中,增加了多项健康状态监测的配置项,包括:
|
||||
|
||||

|
||||
|
||||
1. 健康监测时段(Health Check Duration):可选项,表示对最近多长时间的任务状态进行统计。
|
||||
2. Busy 状态阈值(Busy State Threshold):百分比,表示写入队列中入队元素数量与队列长度之比,默认 100%。
|
||||
3. 写入队列长度(Max Write Queue Length):表示对应的写入队列长度最大值。
|
||||
4. 写入错误阈值(Write Error Threshold):数值类型,表示健康监测时段中允许写入错误的数量。超出阈值,则报错。
|
||||
|
||||
在任务管理列表展示中,有如下状态:
|
||||
|
||||
- Ready:数据源和目标端健康检查通过,可以进行数据读取和写入。
|
||||
- Idle:表示监测时段内无数据处理(没有数据进入处理流程)。
|
||||
- Busy:表示写入队列已满(超出一定阈值,表示写入繁忙,在一定程度上意味着当前可能存在性能瓶颈,需要调整参数或配置等来进行改善,但并不说明存在错误)。
|
||||
- Bounce:数据源和目标端均正常,但在写入过程中存在错误,一定周期内超出阈值,可能意味着存在大量非正常数据或正在发生数据丢失。
|
||||
- SourceError: 数据源错误导致无法进行读取。此时工作负载将尝试重连数据源。
|
||||
- SinkError:写入端错误导致无法进行写入。此时工作负载将尝试重连数据库,恢复后进入 Ready 状态。
|
||||
- Fatal:严重或无法恢复的错误。
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
|
||||
|
||||
<DocCardList items={useCurrentSidebarCategory().items}/>
|
||||
```
|
||||
```
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 6.7 KiB |
Binary file not shown.
Before Width: | Height: | Size: 13 KiB |
Binary file not shown.
Before Width: | Height: | Size: 7.2 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue