Merge branch 'main' of https://github.com/taosdata/TDengine into fix/TS-5761-main
|
@ -99,6 +99,7 @@ tests/examples/JDBC/JDBCDemo/.classpath
|
||||||
tests/examples/JDBC/JDBCDemo/.project
|
tests/examples/JDBC/JDBCDemo/.project
|
||||||
tests/examples/JDBC/JDBCDemo/.settings/
|
tests/examples/JDBC/JDBCDemo/.settings/
|
||||||
source/libs/parser/inc/sql.*
|
source/libs/parser/inc/sql.*
|
||||||
|
source/os/src/timezone/
|
||||||
tests/script/tmqResult.txt
|
tests/script/tmqResult.txt
|
||||||
tests/system-test/case_to_run.txt
|
tests/system-test/case_to_run.txt
|
||||||
tests/develop-test/case_to_run.txt
|
tests/develop-test/case_to_run.txt
|
||||||
|
|
|
@ -402,6 +402,7 @@ pipeline {
|
||||||
WKDIR = '/var/lib/jenkins/workspace'
|
WKDIR = '/var/lib/jenkins/workspace'
|
||||||
WK = '/var/lib/jenkins/workspace/TDinternal'
|
WK = '/var/lib/jenkins/workspace/TDinternal'
|
||||||
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
|
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
|
||||||
|
WKPY = '/var/lib/jenkins/workspace/taos-connector-python'
|
||||||
DOC_WKC = '/root/doc_ci_work'
|
DOC_WKC = '/root/doc_ci_work'
|
||||||
td_repo = 'TDengine'
|
td_repo = 'TDengine'
|
||||||
zh_doc_repo = 'docs.taosdata.com'
|
zh_doc_repo = 'docs.taosdata.com'
|
||||||
|
|
|
@ -97,10 +97,13 @@ ELSE()
|
||||||
SET(TD_TAOS_TOOLS TRUE)
|
SET(TD_TAOS_TOOLS TRUE)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
SET(TAOS_LIB taos)
|
||||||
|
SET(TAOS_LIB_STATIC taos_static)
|
||||||
|
|
||||||
IF(${TD_WINDOWS})
|
IF(${TD_WINDOWS})
|
||||||
SET(TAOS_LIB taos_static)
|
SET(TAOS_LIB_PLATFORM_SPEC taos_static)
|
||||||
ELSE()
|
ELSE()
|
||||||
SET(TAOS_LIB taos)
|
SET(TAOS_LIB_PLATFORM_SPEC taos)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
# build TSZ by default
|
# build TSZ by default
|
||||||
|
@ -128,7 +131,7 @@ IF(TD_WINDOWS)
|
||||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO /FORCE:MULTIPLE")
|
||||||
|
|
||||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.3.4.8.alpha")
|
SET(TD_VER_NUMBER "3.3.5.0.alpha")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# libuv
|
# libuv
|
||||||
ExternalProject_Add(libuv
|
ExternalProject_Add(libuv
|
||||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||||
GIT_TAG v1.48.0
|
GIT_TAG v1.49.2
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||||
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
|
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||||
CONFIGURE_COMMAND ""
|
CONFIGURE_COMMAND ""
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
# timezone
|
||||||
|
ExternalProject_Add(tz
|
||||||
|
GIT_REPOSITORY https://github.com/eggert/tz.git
|
||||||
|
GIT_TAG main
|
||||||
|
SOURCE_DIR "${TD_CONTRIB_DIR}/tz"
|
||||||
|
BINARY_DIR ""
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
#BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
TEST_COMMAND ""
|
||||||
|
GIT_SHALLOW true
|
||||||
|
GIT_PROGRESS true
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
)
|
||||||
|
|
|
@ -106,6 +106,10 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
# cJson
|
# cJson
|
||||||
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
|
if(NOT ${TD_WINDOWS})
|
||||||
|
cat("${TD_SUPPORT_DIR}/tz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
endif(NOT ${TD_WINDOWS})
|
||||||
|
|
||||||
# xz
|
# xz
|
||||||
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
|
||||||
|
@ -651,6 +655,35 @@ if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
IF(TD_LINUX)
|
||||||
|
SET(TZ_OUTPUT_PATH /usr/share/zoneinfo)
|
||||||
|
ELSEIF(TD_DARWIN)
|
||||||
|
SET(TZ_OUTPUT_PATH /var/db/timezone/zoneinfo)
|
||||||
|
ENDIF()
|
||||||
|
|
||||||
|
|
||||||
|
if(NOT ${TD_WINDOWS})
|
||||||
|
MESSAGE(STATUS "timezone file path: " ${TZ_OUTPUT_PATH})
|
||||||
|
|
||||||
|
execute_process(
|
||||||
|
COMMAND make TZDIR=${TZ_OUTPUT_PATH}/ clean tzdir.h
|
||||||
|
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/tz"
|
||||||
|
)
|
||||||
|
|
||||||
|
set(TZ_SRC_DIR "${TD_SOURCE_DIR}/source/os/src/timezone")
|
||||||
|
file(REMOVE_RECURSE ${TZ_SRC_DIR})
|
||||||
|
file(MAKE_DIRECTORY ${TZ_SRC_DIR})
|
||||||
|
file(COPY ${TD_CONTRIB_DIR}/tz/private.h ${TD_CONTRIB_DIR}/tz/tzdir.h ${TD_CONTRIB_DIR}/tz/tzfile.h
|
||||||
|
${TD_CONTRIB_DIR}/tz/localtime.c ${TD_CONTRIB_DIR}/tz/strftime.c
|
||||||
|
DESTINATION ${TZ_SRC_DIR})
|
||||||
|
endif(NOT ${TD_WINDOWS})
|
||||||
|
|
||||||
|
#if(NOT ${TD_WINDOWS})
|
||||||
|
# execute_process(
|
||||||
|
# COMMAND make CFLAGS+=-fPIC CFLAGS+=-g TZDIR=${TZ_OUTPUT_PATH} clean libtz.a
|
||||||
|
# WORKING_DIRECTORY "${TD_CONTRIB_DIR}/tz"
|
||||||
|
# )
|
||||||
|
#endif(NOT ${TD_WINDOWS})
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
# Build test
|
# Build test
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
|
|
|
@ -28,6 +28,9 @@ if(${BUILD_WITH_TRAFT})
|
||||||
# add_subdirectory(traft)
|
# add_subdirectory(traft)
|
||||||
endif(${BUILD_WITH_TRAFT})
|
endif(${BUILD_WITH_TRAFT})
|
||||||
|
|
||||||
add_subdirectory(azure)
|
if(${BUILD_S3})
|
||||||
|
add_subdirectory(azure)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_subdirectory(tdev)
|
add_subdirectory(tdev)
|
||||||
add_subdirectory(lz4)
|
add_subdirectory(lz4)
|
||||||
|
|
|
@ -7,19 +7,19 @@ import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
import Image from '@theme/IdealImage';
|
import Image from '@theme/IdealImage';
|
||||||
import imgConnect from '../assets/connecting-to-tdengine-01.png';
|
import imgConnect from '../assets/connecting-to-tdengine-01.png';
|
||||||
import ConnJava from "./_connect_java.mdx";
|
import ConnJava from "../assets/resources/_connect_java.mdx";
|
||||||
import ConnGo from "./_connect_go.mdx";
|
import ConnGo from "../assets/resources/_connect_go.mdx";
|
||||||
import ConnRust from "./_connect_rust.mdx";
|
import ConnRust from "../assets/resources/_connect_rust.mdx";
|
||||||
import ConnNode from "./_connect_node.mdx";
|
import ConnNode from "../assets/resources/_connect_node.mdx";
|
||||||
import ConnPythonNative from "./_connect_python.mdx";
|
import ConnPythonNative from "../assets/resources/_connect_python.mdx";
|
||||||
import ConnCSNative from "./_connect_cs.mdx";
|
import ConnCSNative from "../assets/resources/_connect_cs.mdx";
|
||||||
import ConnC from "./_connect_c.mdx";
|
import ConnC from "../assets/resources/_connect_c.mdx";
|
||||||
import InstallOnLinux from "../14-reference/05-connector/_linux_install.mdx";
|
import InstallOnLinux from "../assets/resources/_linux_install.mdx";
|
||||||
import InstallOnWindows from "../14-reference/05-connector/_windows_install.mdx";
|
import InstallOnWindows from "../assets/resources/_windows_install.mdx";
|
||||||
import InstallOnMacOS from "../14-reference/05-connector/_macos_install.mdx";
|
import InstallOnMacOS from "../assets/resources/_macos_install.mdx";
|
||||||
import VerifyLinux from "../14-reference/05-connector/_verify_linux.mdx";
|
import VerifyLinux from "../assets/resources/_verify_linux.mdx";
|
||||||
import VerifyMacOS from "../14-reference/05-connector/_verify_macos.mdx";
|
import VerifyMacOS from "../assets/resources/_verify_macos.mdx";
|
||||||
import VerifyWindows from "../14-reference/05-connector/_verify_windows.mdx";
|
import VerifyWindows from "../assets/resources/_verify_windows.mdx";
|
||||||
|
|
||||||
TDengine provides a rich set of application development interfaces. To facilitate users in quickly developing their applications, TDengine supports connectors for multiple programming languages. The official connectors include support for C/C++, Java, Python, Go, Node.js, C#, Rust, Lua (community contribution), and PHP (community contribution). These connectors support connecting to the TDengine cluster using the native interface (taosc) and REST interface (not supported in some languages yet). Community developers have also contributed several unofficial connectors, such as ADO.NET connector, Lua connector, and PHP connector. Additionally, TDengine can directly call the REST API provided by taosadapter for data writing and querying operations.
|
TDengine provides a rich set of application development interfaces. To facilitate users in quickly developing their applications, TDengine supports connectors for multiple programming languages. The official connectors include support for C/C++, Java, Python, Go, Node.js, C#, Rust, Lua (community contribution), and PHP (community contribution). These connectors support connecting to the TDengine cluster using the native interface (taosc) and REST interface (not supported in some languages yet). Community developers have also contributed several unofficial connectors, such as ADO.NET connector, Lua connector, and PHP connector. Additionally, TDengine can directly call the REST API provided by taosadapter for data writing and querying operations.
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ TDengine is configured by default with only one root user, who has the highest p
|
||||||
Only the root user can perform the operation of creating users, with the syntax as follows.
|
Only the root user can perform the operation of creating users, with the syntax as follows.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create user user_name pass'password' [sysinfo {1|0}]
|
create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
||||||
```
|
```
|
||||||
|
|
||||||
The parameters are explained as follows.
|
The parameters are explained as follows.
|
||||||
|
@ -20,11 +20,12 @@ The parameters are explained as follows.
|
||||||
- user_name: Up to 23 B long.
|
- user_name: Up to 23 B long.
|
||||||
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
||||||
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
|
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
|
||||||
|
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. The default value is 0. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
The following SQL can create a user named test with the password 123456 who can view system information.
|
The following SQL can create a user named test with the password abc123!@# who can view system information.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create user test pass '123456' sysinfo 1
|
create user test pass 'abc123!@#' sysinfo 1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Viewing Users
|
### Viewing Users
|
||||||
|
@ -51,6 +52,7 @@ alter_user_clause: {
|
||||||
pass 'literal'
|
pass 'literal'
|
||||||
| enable value
|
| enable value
|
||||||
| sysinfo value
|
| sysinfo value
|
||||||
|
| createdb value
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -59,6 +61,7 @@ The parameters are explained as follows.
|
||||||
- pass: Modify the user's password.
|
- pass: Modify the user's password.
|
||||||
- enable: Whether to enable the user. 1 means to enable this user, 0 means to disable this user.
|
- enable: Whether to enable the user. 1 means to enable this user, 0 means to disable this user.
|
||||||
- sysinfo: Whether the user can view system information. 1 means they can view system information, 0 means they cannot.
|
- sysinfo: Whether the user can view system information. 1 means they can view system information, 0 means they cannot.
|
||||||
|
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
The following SQL disables the user test.
|
The following SQL disables the user test.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: Prometheus
|
||||||
slug: /third-party-tools/data-collection/prometheus
|
slug: /third-party-tools/data-collection/prometheus
|
||||||
---
|
---
|
||||||
|
|
||||||
import Prometheus from "./_prometheus.mdx"
|
import Prometheus from "../../assets/resources/_prometheus.mdx"
|
||||||
|
|
||||||
Prometheus is a popular open-source monitoring and alerting system. In 2016, Prometheus joined the Cloud Native Computing Foundation (CNCF), becoming the second hosted project after Kubernetes. The project has a very active developer and user community.
|
Prometheus is a popular open-source monitoring and alerting system. In 2016, Prometheus joined the Cloud Native Computing Foundation (CNCF), becoming the second hosted project after Kubernetes. The project has a very active developer and user community.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: Telegraf
|
||||||
slug: /third-party-tools/data-collection/telegraf
|
slug: /third-party-tools/data-collection/telegraf
|
||||||
---
|
---
|
||||||
|
|
||||||
import Telegraf from "./_telegraf.mdx"
|
import Telegraf from "../../assets/resources/_telegraf.mdx"
|
||||||
|
|
||||||
Telegraf is a very popular open-source metric collection software. In data collection and platform monitoring systems, Telegraf can collect operational information from various components without the need to manually write scripts for periodic collection, reducing the difficulty of data acquisition.
|
Telegraf is a very popular open-source metric collection software. In data collection and platform monitoring systems, Telegraf can collect operational information from various components without the need to manually write scripts for periodic collection, reducing the difficulty of data acquisition.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: collectd
|
||||||
slug: /third-party-tools/data-collection/collectd
|
slug: /third-party-tools/data-collection/collectd
|
||||||
---
|
---
|
||||||
|
|
||||||
import CollectD from "./_collectd.mdx"
|
import CollectD from "../../assets/resources/_collectd.mdx"
|
||||||
|
|
||||||
collectd is a daemon for collecting system performance. collectd provides various storage mechanisms to store different values. It periodically collects relevant statistical information about the system while it is running and storing information. Utilizing this information helps identify current system performance bottlenecks and predict future system loads.
|
collectd is a daemon for collecting system performance. collectd provides various storage mechanisms to store different values. It periodically collects relevant statistical information about the system while it is running and storing information. Utilizing this information helps identify current system performance bottlenecks and predict future system loads.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: StatsD
|
||||||
slug: /third-party-tools/data-collection/statsd
|
slug: /third-party-tools/data-collection/statsd
|
||||||
---
|
---
|
||||||
|
|
||||||
import StatsD from "./_statsd.mdx"
|
import StatsD from "../../assets/resources/_statsd.mdx"
|
||||||
|
|
||||||
StatsD is a simple daemon for aggregating and summarizing application metrics that has rapidly evolved in recent years into a unified protocol for collecting application performance metrics.
|
StatsD is a simple daemon for aggregating and summarizing application metrics that has rapidly evolved in recent years into a unified protocol for collecting application performance metrics.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: Icinga2
|
||||||
slug: /third-party-tools/data-collection/icinga2
|
slug: /third-party-tools/data-collection/icinga2
|
||||||
---
|
---
|
||||||
|
|
||||||
import Icinga2 from "./_icinga2.mdx"
|
import Icinga2 from "../../assets/resources/_icinga2.mdx"
|
||||||
|
|
||||||
icinga2 is an open-source host and network monitoring software, originally developed from the Nagios network monitoring application. Currently, icinga2 is released under the GNU GPL v2 license.
|
icinga2 is an open-source host and network monitoring software, originally developed from the Nagios network monitoring application. Currently, icinga2 is released under the GNU GPL v2 license.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: TCollector
|
||||||
slug: /third-party-tools/data-collection/tcollector
|
slug: /third-party-tools/data-collection/tcollector
|
||||||
---
|
---
|
||||||
|
|
||||||
import TCollector from "./_tcollector.mdx"
|
import TCollector from "../../assets/resources/_tcollector.mdx"
|
||||||
|
|
||||||
TCollector is part of openTSDB, used for collecting client logs and sending them to the database.
|
TCollector is part of openTSDB, used for collecting client logs and sending them to the database.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
---
|
||||||
|
sidebar_label: Superset
|
||||||
|
title: Integration With Superset
|
||||||
|
toc_max_heading_level: 4
|
||||||
|
---
|
||||||
|
Apache Superset is a modern enterprise level business intelligence (BI) web application primarily used for data exploration and visualization.
|
||||||
|
It is supported by the Apache Software Foundation and is an open source project with an active community and rich ecosystem.
|
||||||
|
Apache Superset provides an intuitive user interface that makes creating, sharing, and visualizing data simple, while supporting multiple data sources and rich visualization options.
|
||||||
|
|
||||||
|
Through the Python connector of TDengine, Superset can support TDengine data sources and provide functions such as data presentation and analysis
|
||||||
|
|
||||||
|
## Install Apache Superset
|
||||||
|
|
||||||
|
Ensure that Apache Superset v2.1.0 or above is installed. If not, please visit [official website](https://superset.apache.org/) to install
|
||||||
|
|
||||||
|
## Install TDengine
|
||||||
|
|
||||||
|
Both TDengine Enterprise Edition and Community Edition are supported, with version requirements of 3.0 or higher
|
||||||
|
|
||||||
|
## Install TDengine Python Connector
|
||||||
|
|
||||||
|
The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services.
|
||||||
|
The connection uses the WebSocket protocol, so it is necessary to install the `taos-ws-py` component of TDengine separately. The complete installation script is as follows:
|
||||||
|
```bash
|
||||||
|
pip3 install taospy
|
||||||
|
pip3 install taos-ws-py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configure TDengine Connection In Superset
|
||||||
|
|
||||||
|
**Step 1**, enter the new database connection page, "Superset" → "Setting" → "Database Connections" → "+DATABASE"
|
||||||
|
**Step 2**, select TDengine database connection, select the "TDengine" option from the drop-down list of "SUPPORTED DATABASES".
|
||||||
|
:::tip
|
||||||
|
If there is no TDengine option in the drop-down list, please confirm that the steps of installing, `Superset` is first and `Python Connector` is second.
|
||||||
|
:::
|
||||||
|
**Step 3**, write a name of connection in "DISPLAY NAME"
|
||||||
|
**Step 4**, The "SQLALCHEMY URL" field is a key connection information string, and it must be filled in correctly
|
||||||
|
```bash
|
||||||
|
taosws://user:password@host:port
|
||||||
|
```
|
||||||
|
| Parameter | <center>Parameter Description</center> |
|
||||||
|
|:---------- |:--------------------------------------------------------- |
|
||||||
|
|user | Username for logging into TDengine database |
|
||||||
|
|password | Password for logging into TDengine database |
|
||||||
|
|host | Name of the host where the TDengine database is located |
|
||||||
|
|port | The port that provides WebSocket services, default is 6041 |
|
||||||
|
|
||||||
|
Example:
|
||||||
|
The TDengine database installed on this machine provides WebSocket service port 6041, using the default username and password, "SQLALCHEMY URL" is:
|
||||||
|
```bash
|
||||||
|
taosws://root:taosdata@localhost:6041
|
||||||
|
```
|
||||||
|
**Step 5**, configure the connection string, click "TEST CONNECTION" to test if the connection can be successful. After passing the test, click the "CONNECT" button to complete the connection
|
||||||
|
|
||||||
|
|
||||||
|
## Start
|
||||||
|
|
||||||
|
There is no difference in the use of TDengine data source compared to other data sources. Here is a brief introduction to basic data queries:
|
||||||
|
1. Click the "+" button in the upper right corner of the Superset interface, select "SQL query", and enter the query interface
|
||||||
|
2. Select the "TDengine" data source that has been created earlier from the dropdown list of "DATABASES" in the upper left corner
|
||||||
|
3. Select the name of the database to be operated on from the drop-down list of "SCHEMA" (system libraries are not displayed)
|
||||||
|
4. "SEE TABLE SCHEMA" select the name of the super table or regular table to be operated on (sub tables are not displayed)
|
||||||
|
5. Subsequently, the schema information of the selected table will be displayed in the following area
|
||||||
|
6. In the SQL editor area, any SQL statement that conforms to TDengine syntax can be entered for execution
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
We chose two popular templates from the Superset Chart template to showcase their effects, using smart meter data as an example:
|
||||||
|
|
||||||
|
1. "Aggregate" Type, which displays the maximum voltage value collected per minute during the specified time period in Group 4
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
2. "RAW RECORDS" Type, which displays the collected values of current and voltage during the specified time period in Group 4
|
||||||
|
|
||||||
|

|
After Width: | Height: | Size: 650 KiB |
After Width: | Height: | Size: 784 KiB |
|
@ -28,68 +28,70 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
||||||
|
|
||||||
### Connection Related
|
### Connection Related
|
||||||
|
|
||||||
|Parameter Name |Supported Version |Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------------|-------------------------|------------|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|firstEp | |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
|firstEp | |Not supported |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
||||||
|secondEp | |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
|secondEp | |Not supported |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
||||||
|fqdn | |The service address that taosd listens on, default is the first hostname configured on the server|
|
|fqdn | |Not supported |The service address that taosd listens on, default is the first hostname configured on the server|
|
||||||
|serverPort | |The port that taosd listens on, default value 6030|
|
|serverPort | |Not supported |The port that taosd listens on, default value 6030|
|
||||||
|compressMsgSize | |Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
|compressMsgSize | |Supported, effective after restart|Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
||||||
|shellActivityTimer | |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
|shellActivityTimer | |Supported, effective immediately |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
||||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
|numOfRpcSessions | |Supported, effective after restart|Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||||
|numOfRpcThreads | |Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
|numOfRpcThreads | |Supported, effective after restart|Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
||||||
|numOfTaskQueueThreads | |Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
|numOfTaskQueueThreads | |Supported, effective after restart|Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
||||||
|rpcQueueMemoryAllowed | |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
|rpcQueueMemoryAllowed | |Supported, effective immediately |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
||||||
|resolveFQDNRetryTime | Cancelled after 3.x |Number of retries when FQDN resolution fails|
|
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
||||||
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||||
|maxShellConns | Cancelled after 3.x |Maximum number of connections allowed|
|
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
||||||
|maxRetryWaitTime | |Maximum timeout for reconnection, default value is 10s|
|
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection, default value is 10s|
|
||||||
|shareConnLimit |Added in 3.3.4.0 |Number of requests a connection can share, range 1-512, default value 10|
|
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
||||||
|readTimeout |Added in 3.3.4.0 |Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||||
|
|
||||||
### Monitoring Related
|
### Monitoring Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|monitor | |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
|monitor | |Supported, effective immediately |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
||||||
|monitorFqdn | |The FQDN of the server where the taosKeeper service is located, default value none|
|
|monitorFqdn | |Supported, effective after restart|The FQDN of the server where the taosKeeper service is located, default value none|
|
||||||
|monitorPort | |The port number listened to by the taosKeeper service, default value 6043|
|
|monitorPort | |Supported, effective after restart|The port number listened to by the taosKeeper service, default value 6043|
|
||||||
|monitorInterval | |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
|monitorInterval | |Supported, effective immediately |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
||||||
|monitorMaxLogs | |Number of cached logs pending report|
|
|monitorMaxLogs | |Supported, effective immediately |Number of cached logs pending report|
|
||||||
|monitorComp | |Whether to use compression when reporting monitoring logs|
|
|monitorComp | |Supported, effective after restart|Whether to use compression when reporting monitoring logs|
|
||||||
|monitorLogProtocol | |Whether to print monitoring logs|
|
|monitorLogProtocol | |Supported, effective immediately |Whether to print monitoring logs|
|
||||||
|monitorForceV2 | |Whether to use V2 protocol for reporting|
|
|monitorForceV2 | |Supported, effective immediately |Whether to use V2 protocol for reporting|
|
||||||
|telemetryReporting | |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
|telemetryReporting | |Supported, effective immediately |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
||||||
|telemetryServer | |Telemetry server address|
|
|telemetryServer | |Not supported |Telemetry server address|
|
||||||
|telemetryPort | |Telemetry server port number|
|
|telemetryPort | |Not supported |Telemetry server port number|
|
||||||
|telemetryInterval | |Telemetry upload interval, in seconds, default 43200|
|
|telemetryInterval | |Supported, effective immediately |Telemetry upload interval, in seconds, default 86400|
|
||||||
|crashReporting | |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
|crashReporting | |Supported, effective immediately |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
||||||
|
|
||||||
### Query Related
|
### Query Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|------------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|countAlwaysReturnValue | |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
|countAlwaysReturnValue | |Supported, effective immediately |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
||||||
|tagFilterCache | |Whether to cache tag filter results|
|
|tagFilterCache | |Not supported |Whether to cache tag filter results|
|
||||||
|maxNumOfDistinctRes | |Maximum number of distinct results allowed to return, default value 100,000, maximum allowed value 100 million|
|
|queryBufferSize | |Supported, effective after restart|Not effective yet|
|
||||||
|queryBufferSize | |Not effective yet|
|
|queryRspPolicy | |Supported, effective immediately |Query response strategy|
|
||||||
|queryRspPolicy | |Query response strategy|
|
|queryUseMemoryPool | |Not supported |Whether query will use memory pool to manage memory, default value: 1 (on); 0: off, 1: on|
|
||||||
|filterScalarMode | |Force scalar filter mode, 0: off; 1: on, default value 0|
|
|minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000|
|
||||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
|singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000|
|
||||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
|filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||||
|queryMaxConcurrentTables| |Internal parameter, concurrency number of the query plan|
|
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||||
|queryRsmaTolerance | |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan|
|
||||||
|pqSortMemThreshold | |Internal parameter, memory threshold for sorting|
|
|queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||||
|
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||||
|
|pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting|
|
||||||
|
|
||||||
### Region Related
|
### Region Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|timezone | |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
|timezone | |Not supported |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||||
|locale | |System locale information and encoding format, defaults to obtaining from the system|
|
|locale | |Not supported |System locale information and encoding format, defaults to obtaining from the system|
|
||||||
|charset | |Character set encoding, defaults to obtaining from the system|
|
|charset | |Not supported |Character set encoding, defaults to obtaining from the system|
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
|
@ -167,152 +169,153 @@ The effective value of charset is UTF-8.
|
||||||
|
|
||||||
### Storage Related
|
### Storage Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|--------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|dataDir | |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
|dataDir | |Not supported |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
||||||
|tempDir | |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
|tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||||
|minimalDataDirGB | |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
|minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||||
|minimalTmpDirGB | |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
|minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||||
|minDiskFreeSize |After 3.1.1.0|When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||||
|s3MigrateIntervalSec|After 3.3.4.3|Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
|s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||||
|s3MigrateEnabled |After 3.3.4.3|Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
|s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||||
|s3Accesskey |After 3.3.4.3|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
|s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||||
|s3Endpoint |After 3.3.4.3|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
|s3Endpoint |After 3.3.4.3|Supported, effective after restart|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
||||||
|s3BucketName |After 3.3.4.3|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
|s3BucketName |After 3.3.4.3|Supported, effective after restart|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
||||||
|s3PageCacheSize |After 3.3.4.3|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
|s3PageCacheSize |After 3.3.4.3|Supported, effective after restart|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
||||||
|s3UploadDelaySec |After 3.3.4.3|How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
|s3UploadDelaySec |After 3.3.4.3|Supported, effective immediately |How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
||||||
|cacheLazyLoadThreshold| |Internal parameter, cache loading strategy|
|
|cacheLazyLoadThreshold| |Supported, effective immediately |Internal parameter, cache loading strategy|
|
||||||
|
|
||||||
### Cluster Related
|
### Cluster Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|--------------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
|supportVnodes | |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
|supportVnodes | |Supported, effective immediately |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
||||||
|numOfCommitThreads | |Maximum number of commit threads, range 0-1024, default value 4|
|
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 0-1024, default value 4|
|
||||||
|numOfMnodeReadThreads | |Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
|numOfMnodeReadThreads | |Supported, effective after restart|Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||||
|numOfVnodeQueryThreads | |Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
|numOfVnodeQueryThreads | |Supported, effective after restart|Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||||
|numOfVnodeFetchThreads | |Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
|numOfVnodeFetchThreads | |Supported, effective after restart|Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||||
|numOfVnodeRsmaThreads | |Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
|numOfVnodeRsmaThreads | |Supported, effective after restart|Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||||
|numOfQnodeQueryThreads | |Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
|numOfQnodeQueryThreads | |Supported, effective after restart|Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||||
|numOfSnodeSharedThreads | |Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
|numOfSnodeSharedThreads | |Supported, effective after restart|Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||||
|numOfSnodeUniqueThreads | |Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
|numOfSnodeUniqueThreads | |Supported, effective after restart|Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||||
|ratioOfVnodeStreamThreads | |Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
|ratioOfVnodeStreamThreads | |Supported, effective after restart|Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
||||||
|ttlUnit | |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
|ttlUnit | |Not supported |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
||||||
|ttlPushInterval | |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
|ttlPushInterval | |Supported, effective immediately |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
||||||
|ttlChangeOnWrite | |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
|ttlChangeOnWrite | |Supported, effective immediately |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
||||||
|ttlBatchDropNum | |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
|ttlBatchDropNum | |Supported, effective immediately |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
||||||
|retentionSpeedLimitMB | |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
|retentionSpeedLimitMB | |Supported, effective immediately |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
||||||
|maxTsmaNum | |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
|maxTsmaNum | |Supported, effective immediately |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
||||||
|tmqMaxTopicNum | |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
|tmqMaxTopicNum | |Supported, effective immediately |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
||||||
|tmqRowSize | |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
|tmqRowSize | |Supported, effective immediately |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
||||||
|audit | |Audit feature switch; Enterprise parameter|
|
|audit | |Supported, effective immediately |Audit feature switch; Enterprise parameter|
|
||||||
|auditInterval | |Time interval for reporting audit data; Enterprise parameter|
|
|auditInterval | |Supported, effective immediately |Time interval for reporting audit data; Enterprise parameter|
|
||||||
|auditCreateTable | |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
|auditCreateTable | |Supported, effective immediately |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
||||||
|encryptAlgorithm | |Data encryption algorithm; Enterprise parameter|
|
|encryptAlgorithm | |Not supported |Data encryption algorithm; Enterprise parameter|
|
||||||
|encryptScope | |Encryption scope; Enterprise parameter|
|
|encryptScope | |Not supported |Encryption scope; Enterprise parameter|
|
||||||
|enableWhiteList | |Switch for whitelist feature; Enterprise parameter|
|
|enableWhiteList | |Supported, effective immediately |Switch for whitelist feature; Enterprise parameter|
|
||||||
|syncLogBufferMemoryAllowed| |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
|syncLogBufferMemoryAllowed| |Supported, effective immediately |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
||||||
|syncElectInterval | |Internal parameter, for debugging synchronization module|
|
|syncElectInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||||
|syncHeartbeatInterval | |Internal parameter, for debugging synchronization module|
|
|syncHeartbeatInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||||
|syncHeartbeatTimeout | |Internal parameter, for debugging synchronization module|
|
|syncHeartbeatTimeout | |Not supported |Internal parameter, for debugging synchronization module|
|
||||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
|syncSnapReplMaxWaitN | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
|arbHeartBeatIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|arbHeartBeatIntervalSec | |Internal parameter, for debugging synchronization module|
|
|arbCheckSyncIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|arbCheckSyncIntervalSec | |Internal parameter, for debugging synchronization module|
|
|arbSetAssignedTimeoutSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||||
|arbSetAssignedTimeoutSec | |Internal parameter, for debugging synchronization module|
|
|mndSdbWriteDelta | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||||
|mndSdbWriteDelta | |Internal parameter, for debugging mnode module|
|
|mndLogRetention | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||||
|mndLogRetention | |Internal parameter, for debugging mnode module|
|
|skipGrant | |Not supported |Internal parameter, for authorization checks|
|
||||||
|skipGrant | |Internal parameter, for authorization checks|
|
|trimVDbIntervalSec | |Supported, effective immediately |Internal parameter, for deleting expired data|
|
||||||
|trimVDbIntervalSec | |Internal parameter, for deleting expired data|
|
|ttlFlushThreshold | |Supported, effective immediately |Internal parameter, frequency of ttl timer|
|
||||||
|ttlFlushThreshold | |Internal parameter, frequency of ttl timer|
|
|compactPullupInterval | |Supported, effective immediately |Internal parameter, frequency of data reorganization timer|
|
||||||
|compactPullupInterval | |Internal parameter, frequency of data reorganization timer|
|
|walFsyncDataSizeLimit | |Supported, effective immediately |Internal parameter, threshold for WAL to perform FSYNC|
|
||||||
|walFsyncDataSizeLimit | |Internal parameter, threshold for WAL to perform FSYNC|
|
|transPullupInterval | |Supported, effective immediately |Internal parameter, retry interval for mnode to execute transactions|
|
||||||
|transPullupInterval | |Internal parameter, retry interval for mnode to execute transactions|
|
|mqRebalanceInterval | |Supported, effective immediately |Internal parameter, interval for consumer rebalancing|
|
||||||
|mqRebalanceInterval | |Internal parameter, interval for consumer rebalancing|
|
|uptimeInterval | |Supported, effective immediately |Internal parameter, for recording system uptime|
|
||||||
|uptimeInterval | |Internal parameter, for recording system uptime|
|
|timeseriesThreshold | |Supported, effective immediately |Internal parameter, for usage statistics|
|
||||||
|timeseriesThreshold | |Internal parameter, for usage statistics|
|
|udf | |Supported, effective after restart|Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||||
|udf | |Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
|udfdResFuncs | |Supported, effective after restart|Internal parameter, for setting UDF result sets|
|
||||||
|udfdResFuncs | |Internal parameter, for setting UDF result sets|
|
|udfdLdLibPath | |Supported, effective after restart|Internal parameter, indicates the library path for loading UDF|
|
||||||
|udfdLdLibPath | |Internal parameter, indicates the library path for loading UDF|
|
|
||||||
|
|
||||||
### Stream Computing Parameters
|
### Stream Computing Parameters
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|-----------------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| disableStream | | Switch to enable or disable stream computing |
|
| disableStream | |Supported, effective immediately | Switch to enable or disable stream computing |
|
||||||
| streamBufferSize | | Controls the size of the window state cache in memory, default value is 128MB |
|
| streamBufferSize | |Supported, effective immediately | Controls the size of the window state cache in memory, default value is 128MB |
|
||||||
| streamAggCnt | | Internal parameter, number of concurrent aggregation computations |
|
| streamAggCnt | |Not supported | Internal parameter, number of concurrent aggregation computations |
|
||||||
| checkpointInterval | | Internal parameter, checkpoint synchronization interval |
|
| checkpointInterval | |Supported, effective after restart| Internal parameter, checkpoint synchronization interval |
|
||||||
| concurrentCheckpoint | | Internal parameter, whether to check checkpoints concurrently |
|
| concurrentCheckpoint | |Supported, effective immediately | Internal parameter, whether to check checkpoints concurrently |
|
||||||
| maxStreamBackendCache | | Internal parameter, maximum cache used by stream computing |
|
| maxStreamBackendCache | |Supported, effective immediately | Internal parameter, maximum cache used by stream computing |
|
||||||
| streamSinkDataRate | | Internal parameter, used to control the write speed of stream computing results |
|
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
|
||||||
|
|
||||||
### Log Related
|
### Log Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|----------------|----------|-|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| logDir | | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
||||||
| minimalLogDirGB | | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
| minimalLogDirGB | |Not supported | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
||||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
||||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
||||||
| logKeepDays | | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||||
| slowLogThreshold| 3.3.3.0 onwards | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
| slowLogThreshold| 3.3.3.0 onwards |Supported, effective immediately | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
||||||
| slowLogMaxLen | 3.3.3.0 onwards | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
| slowLogMaxLen | 3.3.3.0 onwards |Supported, effective immediately | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
||||||
| slowLogScope | 3.3.3.0 onwards | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
| slowLogScope | 3.3.3.0 onwards |Supported, effective immediately | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
||||||
| slowLogExceptDb | 3.3.3.0 onwards | Specifies the database that does not report slow queries, only supports configuring one database |
|
| slowLogExceptDb | 3.3.3.0 onwards |Supported, effective immediately | Specifies the database that does not report slow queries, only supports configuring one database |
|
||||||
| debugFlag | | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||||
| tmrDebugFlag | | Log switch for the timer module, range as above |
|
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, range as above |
|
||||||
| uDebugFlag | | Log switch for the utility module, range as above |
|
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, range as above |
|
||||||
| rpcDebugFlag | | Log switch for the rpc module, range as above |
|
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, range as above |
|
||||||
| qDebugFlag | | Log switch for the query module, range as above |
|
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, range as above |
|
||||||
| dDebugFlag | | Log switch for the dnode module, range as above |
|
| dDebugFlag | |Supported, effective immediately | Log switch for the dnode module, range as above |
|
||||||
| vDebugFlag | | Log switch for the vnode module, range as above |
|
| vDebugFlag | |Supported, effective immediately | Log switch for the vnode module, range as above |
|
||||||
| mDebugFlag | | Log switch for the mnode module, range as above |
|
| mDebugFlag | |Supported, effective immediately | Log switch for the mnode module, range as above |
|
||||||
| azDebugFlag | 3.3.4.3 onwards | Log switch for the S3 module, range as above |
|
| azDebugFlag | 3.3.4.3 onwards |Supported, effective immediately | Log switch for the S3 module, range as above |
|
||||||
| sDebugFlag | | Log switch for the sync module, range as above |
|
| sDebugFlag | |Supported, effective immediately | Log switch for the sync module, range as above |
|
||||||
| tsdbDebugFlag | | Log switch for the tsdb module, range as above |
|
| tsdbDebugFlag | |Supported, effective immediately | Log switch for the tsdb module, range as above |
|
||||||
| tqDebugFlag | | Log switch for the tq module, range as above |
|
| tqDebugFlag | |Supported, effective immediately | Log switch for the tq module, range as above |
|
||||||
| fsDebugFlag | | Log switch for the fs module, range as above |
|
| fsDebugFlag | |Supported, effective immediately | Log switch for the fs module, range as above |
|
||||||
| udfDebugFlag | | Log switch for the udf module, range as above |
|
| udfDebugFlag | |Supported, effective immediately | Log switch for the udf module, range as above |
|
||||||
| smaDebugFlag | | Log switch for the sma module, range as above |
|
| smaDebugFlag | |Supported, effective immediately | Log switch for the sma module, range as above |
|
||||||
| idxDebugFlag | | Log switch for the index module, range as above |
|
| idxDebugFlag | |Supported, effective immediately | Log switch for the index module, range as above |
|
||||||
| tdbDebugFlag | | Log switch for the tdb module, range as above |
|
| tdbDebugFlag | |Supported, effective immediately | Log switch for the tdb module, range as above |
|
||||||
| metaDebugFlag | | Log switch for the meta module, range as above |
|
| metaDebugFlag | |Supported, effective immediately | Log switch for the meta module, range as above |
|
||||||
| stDebugFlag | | Log switch for the stream module, range as above |
|
| stDebugFlag | |Supported, effective immediately | Log switch for the stream module, range as above |
|
||||||
| sndDebugFlag | | Log switch for the snode module, range as above |
|
| sndDebugFlag | |Supported, effective immediately | Log switch for the snode module, range as above |
|
||||||
|
|
||||||
### Debugging Related
|
### Debugging Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|----------------------|-------------------|-------------|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
||||||
| configDir | | Directory where the configuration files are located |
|
| configDir | |Not supported | Directory where the configuration files are located |
|
||||||
| scriptDir | | Directory for internal test tool scripts |
|
|forceReadConfig | |Not supported ||Force the use of parameters from the configuration file,default value: 0|
|
||||||
| assert | | Assertion control switch, default value is 0 |
|
| scriptDir | |Not supported | Directory for internal test tool scripts |
|
||||||
| randErrorChance | | Internal parameter, used for random failure testing |
|
| assert | |Not supported | Assertion control switch, default value is 0 |
|
||||||
| randErrorDivisor | | Internal parameter, used for random failure testing |
|
| randErrorChance | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| randErrorScope | | Internal parameter, used for random failure testing |
|
| randErrorDivisor | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| safetyCheckLevel | | Internal parameter, used for random failure testing |
|
| randErrorScope | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| experimental | | Internal parameter, used for some experimental features |
|
| safetyCheckLevel | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
| experimental | |Supported, effective immediately | Internal parameter, used for some experimental features |
|
||||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||||
| rsyncPort | | Internal parameter, used for debugging stream computing |
|
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||||
| snodeAddress | | Internal parameter, used for debugging stream computing |
|
| rsyncPort | |Not supported | Internal parameter, used for debugging stream computing |
|
||||||
| checkpointBackupDir | | Internal parameter, used for restoring snode data |
|
| snodeAddress | |Supported, effective immediately | Internal parameter, used for debugging stream computing |
|
||||||
| enableAuditDelete | | Internal parameter, used for testing audit functions |
|
| checkpointBackupDir | |Supported, effective immediately | Internal parameter, used for restoring snode data |
|
||||||
| slowLogThresholdTest | | Internal parameter, used for testing slow logs |
|
| enableAuditDelete | |Not supported | Internal parameter, used for testing audit functions |
|
||||||
|
| slowLogThresholdTest | |Not supported | Internal parameter, used for testing slow logs |
|
||||||
|
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||||
|
|
||||||
### Compression Parameters
|
### Compression Parameters
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||||
|----------------|-------------------|-------------|
|
|-----------------------|-------------------------|--------------------|------------|
|
||||||
| fPrecision | | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
| fPrecision | |Supported, effective immediately | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||||
| dPrecision | | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
| dPrecision | |Supported, effective immediately | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||||
| lossyColumn | Before 3.3.0.0 | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
| lossyColumn | Before 3.3.0.0 |Not supported | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
||||||
| ifAdtFse | | When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
| ifAdtFse | |Supported, effective after restart| When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
||||||
| maxRange | | Internal parameter, used for setting lossy compression |
|
| maxRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||||
| curRange | | Internal parameter, used for setting lossy compression |
|
| curRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||||
| compressor | | Internal parameter, used for setting lossy compression |
|
| compressor | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||||
|
|
||||||
**Additional Notes**
|
**Additional Notes**
|
||||||
|
|
||||||
|
|
|
@ -10,107 +10,109 @@ The TDengine client driver provides all the APIs needed for application programm
|
||||||
|
|
||||||
### Connection Related
|
### Connection Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|----------------------|----------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
|firstEp | |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
|firstEp | |Supported, effective immediately |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
||||||
|secondEp | |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
|secondEp | |Supported, effective immediately |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
||||||
|compressMsgSize | |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
|compressMsgSize | |Supported, effective immediately |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
||||||
|shellActivityTimer | |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
|shellActivityTimer | |Not supported |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
||||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
|numOfRpcSessions | |Supported, effective immediately |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||||
|numOfRpcThreads | |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
|numOfRpcThreads | |Not supported |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
||||||
|numOfTaskQueueThreads | |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
|numOfTaskQueueThreads | |Not supported |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
||||||
|timeToGetAvailableConn| Cancelled after 3.3.4.* |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
|timeToGetAvailableConn| Cancelled after 3.3.4.* |Not supported |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||||
|useAdapter | |Internal parameter, whether to use taosadapter, affects CSV file import|
|
|useAdapter | |Supported, effective immediately |Internal parameter, whether to use taosadapter, affects CSV file import|
|
||||||
|shareConnLimit |Added in 3.3.4.0|Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
|shareConnLimit |Added in 3.3.4.0|Not supported |Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
||||||
|readTimeout |Added in 3.3.4.0|Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
|readTimeout |Added in 3.3.4.0|Not supported |Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
||||||
|
|
||||||
### Query Related
|
### Query Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|---------------------------------|---------|-|
|
|----------------------|----------|--------------------|-------------|
|
||||||
|countAlwaysReturnValue | |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
|countAlwaysReturnValue | |Supported, effective immediately |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
||||||
|keepColumnName | |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
|keepColumnName | |Supported, effective immediately |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
||||||
|multiResultFunctionStarReturnTags|After 3.3.3.0|When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
|multiResultFunctionStarReturnTags|After 3.3.3.0|Supported, effective immediately |When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
||||||
|metaCacheMaxSize | |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
|metaCacheMaxSize | |Supported, effective immediately |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
||||||
|maxTsmaCalcDelay | |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
|maxTsmaCalcDelay | |Supported, effective immediately |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
||||||
|tsmaDataDeleteMark | |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
|tsmaDataDeleteMark | |Supported, effective immediately |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
||||||
|queryPolicy | |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
|queryPolicy | |Supported, effective immediately |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
||||||
|queryTableNotExistAsEmpty | |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
|queryTableNotExistAsEmpty | |Supported, effective immediately |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
||||||
|querySmaOptimize | |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
|querySmaOptimize | |Supported, effective immediately |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
||||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||||
|queryMaxConcurrentTables | |Internal parameter, concurrency number of the query plan|
|
|queryMaxConcurrentTables | |Not supported |Internal parameter, concurrency number of the query plan|
|
||||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||||
|minSlidingTime | |Internal parameter, minimum allowable value for sliding|
|
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||||
|minIntervalTime | |Internal parameter, minimum allowable value for interval|
|
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||||
|
|
||||||
### Writing Related
|
### Writing Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|---------------------------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| smlChildTableName | | Key for custom child table name in schemaless, no default value |
|
| smlChildTableName | |Supported, effective immediately | Key for custom child table name in schemaless, no default value |
|
||||||
| smlAutoChildTableNameDelimiter | | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
| smlAutoChildTableNameDelimiter | |Supported, effective immediately | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
||||||
| smlTagName | | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
| smlTagName | |Supported, effective immediately | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
||||||
| smlTsDefaultName | | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
| smlTsDefaultName | |Supported, effective immediately | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
||||||
| smlDot2Underline | | Converts dots in supertable names to underscores in schemaless |
|
| smlDot2Underline | |Supported, effective immediately | Converts dots in supertable names to underscores in schemaless |
|
||||||
| maxInsertBatchRows | | Internal parameter, maximum number of rows per batch insert |
|
| maxInsertBatchRows | |Supported, effective immediately | Internal parameter, maximum number of rows per batch insert |
|
||||||
|
|
||||||
### Region Related
|
### Region Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|----------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| timezone | | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
| timezone | |Supported, effective immediately | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
||||||
| locale | | System locale and encoding format, defaults to system settings |
|
| locale | |Supported, effective immediately | System locale and encoding format, defaults to system settings |
|
||||||
| charset | | Character set encoding, defaults to system settings |
|
| charset | |Supported, effective immediately | Character set encoding, defaults to system settings |
|
||||||
|
|
||||||
### Storage Related
|
### Storage Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|-----------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| tempDir | | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
| tempDir | |Supported, effective immediately | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
||||||
| minimalTmpDirGB | | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
| minimalTmpDirGB | |Supported, effective immediately | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
||||||
|
|
||||||
### Log Related
|
### Log Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|------------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| logDir | | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
||||||
| minimalLogDirGB | | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
| minimalLogDirGB | |Supported, effective immediately | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
||||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
||||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
||||||
| logKeepDays | | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||||
| debugFlag | | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||||
| tmrDebugFlag | | Log switch for the timer module, value range as above |
|
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, value range as above |
|
||||||
| uDebugFlag | | Log switch for the utility module, value range as above |
|
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, value range as above |
|
||||||
| rpcDebugFlag | | Log switch for the rpc module, value range as above |
|
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, value range as above |
|
||||||
| jniDebugFlag | | Log switch for the jni module, value range as above |
|
| jniDebugFlag | |Supported, effective immediately | Log switch for the jni module, value range as above |
|
||||||
| qDebugFlag | | Log switch for the query module, value range as above |
|
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, value range as above |
|
||||||
| cDebugFlag | | Log switch for the client module, value range as above |
|
| cDebugFlag | |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||||
| simDebugFlag | | Internal parameter, log switch for the test tool, value range as above |
|
| simDebugFlag | |Supported, effective immediately | Internal parameter, log switch for the test tool, value range as above |
|
||||||
| tqClientDebugFlag| After 3.3.4.3 | Log switch for the client module, value range as above |
|
| tqClientDebugFlag| After 3.3.4.3 |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||||
|
|
||||||
### Debugging Related
|
### Debugging Related
|
||||||
|
|
||||||
| Parameter Name | Supported Version | Description |
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|------------------|-------------------|-------------|
|
|----------------------|----------|--------------------|-------------|
|
||||||
| crashReporting | | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
| crashReporting | |Supported, effective immediately | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
||||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
||||||
| assert | | Assertion control switch, default value: 0 |
|
| assert | |Not supported | Assertion control switch, default value: 0 |
|
||||||
| configDir | | Directory for configuration files |
|
| configDir | |Not supported | Directory for configuration files |
|
||||||
| scriptDir | | Internal parameter, directory for test cases |
|
| scriptDir | |Not supported | Internal parameter, directory for test cases |
|
||||||
| randErrorChance | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| randErrorChance | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| randErrorDivisor | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| randErrorDivisor | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| randErrorScope | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| randErrorScope | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| safetyCheckLevel | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
| safetyCheckLevel | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||||
|
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||||
|
|
||||||
|
|
||||||
### SHELL Related
|
### SHELL Related
|
||||||
|
|
||||||
|Parameter Name|Supported Version|Description|
|
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||||
|-----------------|----------|-|
|
|----------------------|----------|--------------------|-------------|
|
||||||
|enableScience | |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
|enableScience | |Not supported |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
||||||
|
|
||||||
## API
|
## API
|
||||||
|
|
||||||
|
|
|
@ -6,11 +6,11 @@ slug: /tdengine-reference/components/taosadapter
|
||||||
|
|
||||||
import Image from '@theme/IdealImage';
|
import Image from '@theme/IdealImage';
|
||||||
import imgAdapter from '../../assets/taosadapter-01.png';
|
import imgAdapter from '../../assets/taosadapter-01.png';
|
||||||
import Prometheus from "../../10-third-party/01-collection/_prometheus.mdx"
|
import Prometheus from "../../assets/resources/_prometheus.mdx"
|
||||||
import CollectD from "../../10-third-party/01-collection/_collectd.mdx"
|
import CollectD from "../../assets/resources/_collectd.mdx"
|
||||||
import StatsD from "../../10-third-party/01-collection/_statsd.mdx"
|
import StatsD from "../../assets/resources/_statsd.mdx"
|
||||||
import Icinga2 from "../../10-third-party/01-collection/_icinga2.mdx"
|
import Icinga2 from "../../assets/resources/_icinga2.mdx"
|
||||||
import TCollector from "../../10-third-party/01-collection/_tcollector.mdx"
|
import TCollector from "../../assets/resources/_tcollector.mdx"
|
||||||
|
|
||||||
taosAdapter is a companion tool for TDengine, serving as a bridge and adapter between the TDengine cluster and applications. It provides an easy and efficient way to ingest data directly from data collection agents (such as Telegraf, StatsD, collectd, etc.). It also offers InfluxDB/OpenTSDB compatible data ingestion interfaces, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine.
|
taosAdapter is a companion tool for TDengine, serving as a bridge and adapter between the TDengine cluster and applications. It provides an easy and efficient way to ingest data directly from data collection agents (such as Telegraf, StatsD, collectd, etc.). It also offers InfluxDB/OpenTSDB compatible data ingestion interfaces, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine.
|
||||||
|
|
||||||
|
|
|
@ -173,6 +173,36 @@ Metric details:
|
||||||
|
|
||||||
There are also line charts for the above categories.
|
There are also line charts for the above categories.
|
||||||
|
|
||||||
|
### Automatic import of preconfigured alert rules
|
||||||
|
|
||||||
|
After summarizing user experience, 14 commonly used alert rules are sorted out. These alert rules can monitor key indicators of the TDengine cluster and report alerts, such as abnormal and exceeded indicators.
|
||||||
|
Starting from TDengine-Server 3.3.4.3 (TDengine-datasource 3.6.3), TDengine Datasource supports automatic import of preconfigured alert rules. You can import 14 alert rules to Grafana (version 11 or later) with one click.
|
||||||
|
In the TDengine-datasource setting interface, turn on the "Load Tengine Alert" switch, click the "Save & test" button, the plugin will automatically load the mentioned 14 alert rules. The rules will be placed in the Grafana alerts directory. If not required, turn off the "Load TDengine Alert" switch, and click the button next to "Clear TDengine Alert" to clear all the alert rules imported into this data source.
|
||||||
|
|
||||||
|
After importing, click on "Alert rules" on the left side of the Grafana interface to view all current alert rules. By configuring contact points, users can receive alert notifications.
|
||||||
|
|
||||||
|
The specific configuration of the 14 alert rules is as follows:
|
||||||
|
|
||||||
|
| alert rule| Rule threshold| Behavior when no data | Data scanning interval |Duration | SQL |
|
||||||
|
| ------ | --------- | ---------------- | ----------- |------- |----------------------|
|
||||||
|
|CPU load of dnode node|average > 80%|Trigger alert|5 minutes|5 minutes |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `|
|
||||||
|
|Memory of dnode node |average > 60%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts <now partition by dnode_id`|
|
||||||
|
|Disk capacity occupancy of dnode nodes | > 80%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`|
|
||||||
|
|Authorization expires |< 60天|Trigger alert|1 day|0 0 seconds|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||||
|
|The used measurement points has reached the authorized number|>= 90%|Trigger alert|1 day|0 seconds|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`|
|
||||||
|
|Number of concurrent query requests | > 100|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`|
|
||||||
|
|Maximum time for slow query execution (no time window) |> 300秒|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`|
|
||||||
|
|dnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`|
|
||||||
|
|vnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||||
|
|Number of data deletion requests |> 0|Do not trigger alert|30 seconds|0 seconds|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``|
|
||||||
|
|Adapter RESTful request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``|
|
||||||
|
|Adapter WebSocket request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``|
|
||||||
|
|Dnode data reporting is missing |< 3|Trigger alert|180 seconds|0 seconds|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`|
|
||||||
|
|Restart dnode |max(update_time) > last(update_time)|Trigger alert|90 seconds|0 seconds|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`|
|
||||||
|
|
||||||
|
TDengine users can modify and improve these alert rules according to their own business needs. In Grafana 7.5 and below versions, the Dashboard and Alert rules functions are combined, while in subsequent new versions, the two functions are separated. To be compatible with Grafana7.5 and below versions, an Alert Used Only panel has been added to the TDinsight panel, which is only required for Grafana7.5 and below versions.
|
||||||
|
|
||||||
|
|
||||||
## Upgrade
|
## Upgrade
|
||||||
|
|
||||||
The following three methods can be used for upgrading:
|
The following three methods can be used for upgrading:
|
||||||
|
|
|
@ -215,4 +215,19 @@ Automatically adjusts the distribution of vnodes in all vgroups of the cluster,
|
||||||
SHOW db_name.ALIVE;
|
SHOW db_name.ALIVE;
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the availability status of the database db_name, return values 0: unavailable, 1: fully available, 2: partially available (i.e., some nodes of the VNODEs included in the database are available, some are not)
|
Query the availability status of the database db_name, with return values of 0 (unavailable), 1 (fully available), or 2 (partially available, indicating that some VNODEs in the database are available while others are not).
|
||||||
|
|
||||||
|
## View DB Disk Usage
|
||||||
|
|
||||||
|
```sql
|
||||||
|
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name'
|
||||||
|
```
|
||||||
|
|
||||||
|
View the disk usage of each module in the DB.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW db_name.disk_info;
|
||||||
|
```
|
||||||
|
View the compression ratio and disk usage of the database db_name
|
||||||
|
|
||||||
|
This command is essentially equivalent to `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname"`
|
||||||
|
|
|
@ -190,6 +190,7 @@ ROUND(expr[, digits])
|
||||||
- `digits` less than zero means discarding the decimal places and rounding the number to the left of the decimal point by `digits` places. If the number of places to the left of the decimal point is less than `digits`, returns 0.
|
- `digits` less than zero means discarding the decimal places and rounding the number to the left of the decimal point by `digits` places. If the number of places to the left of the decimal point is less than `digits`, returns 0.
|
||||||
- Since the DECIMAL type is not yet supported, this function will use DOUBLE and FLOAT to represent results containing decimals, but DOUBLE and FLOAT have precision limits, and using this function may be meaningless when there are too many digits.
|
- Since the DECIMAL type is not yet supported, this function will use DOUBLE and FLOAT to represent results containing decimals, but DOUBLE and FLOAT have precision limits, and using this function may be meaningless when there are too many digits.
|
||||||
- Can only be used with regular columns, selection (Selection), projection (Projection) functions, and cannot be used with aggregation (Aggregation) functions.
|
- Can only be used with regular columns, selection (Selection), projection (Projection) functions, and cannot be used with aggregation (Aggregation) functions.
|
||||||
|
- `digits` is supported from version 3.3.3.0.
|
||||||
|
|
||||||
**Example**:
|
**Example**:
|
||||||
|
|
||||||
|
@ -249,6 +250,8 @@ TAN(expr)
|
||||||
|
|
||||||
**Function Description**: Obtains the tangent result of the specified field.
|
**Function Description**: Obtains the tangent result of the specified field.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: DOUBLE.
|
**Return Result Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric types.
|
**Applicable Data Types**: Numeric types.
|
||||||
|
@ -297,6 +300,8 @@ TRUNCATE(expr, digits)
|
||||||
|
|
||||||
**Function Description**: Gets the truncated value of the specified field to the specified number of digits.
|
**Function Description**: Gets the truncated value of the specified field to the specified number of digits.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: Consistent with the original data type of the `expr` field.
|
**Return Type**: Consistent with the original data type of the `expr` field.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -340,6 +345,8 @@ EXP(expr)
|
||||||
|
|
||||||
**Function Description**: Returns the value of e (the base of natural logarithms) raised to the specified power.
|
**Function Description**: Returns the value of e (the base of natural logarithms) raised to the specified power.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: DOUBLE.
|
**Return Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric type.
|
**Applicable Data Types**: Numeric type.
|
||||||
|
@ -370,6 +377,8 @@ LN(expr)
|
||||||
|
|
||||||
**Function Description**: Returns the natural logarithm of the specified parameter.
|
**Function Description**: Returns the natural logarithm of the specified parameter.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: DOUBLE.
|
**Return Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric type.
|
**Applicable Data Types**: Numeric type.
|
||||||
|
@ -401,6 +410,8 @@ MOD(expr1, expr2)
|
||||||
|
|
||||||
**Function Description**: Calculates the result of expr1 % expr2.
|
**Function Description**: Calculates the result of expr1 % expr2.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: DOUBLE.
|
**Return Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric type.
|
**Applicable Data Types**: Numeric type.
|
||||||
|
@ -437,6 +448,8 @@ RAND([seed])
|
||||||
|
|
||||||
**Function Description**: Returns a uniformly distributed random number from 0 to 1.
|
**Function Description**: Returns a uniformly distributed random number from 0 to 1.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: DOUBLE.
|
**Return Result Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -484,6 +497,8 @@ SIGN(expr)
|
||||||
|
|
||||||
**Function Description**: Returns the sign of the specified parameter.
|
**Function Description**: Returns the sign of the specified parameter.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: Consistent with the original data type of the specified field.
|
**Return Result Type**: Consistent with the original data type of the specified field.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric types.
|
**Applicable Data Types**: Numeric types.
|
||||||
|
@ -527,6 +542,8 @@ DEGREES(expr)
|
||||||
|
|
||||||
**Function Description**: Calculates the value of the specified parameter converted from radians to degrees.
|
**Function Description**: Calculates the value of the specified parameter converted from radians to degrees.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: DOUBLE.
|
**Return Result Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric types.
|
**Applicable Data Types**: Numeric types.
|
||||||
|
@ -558,6 +575,8 @@ RADIANS(expr)
|
||||||
|
|
||||||
**Function Description**: Calculates the value of the specified parameter converted from degrees to radians.
|
**Function Description**: Calculates the value of the specified parameter converted from degrees to radians.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: DOUBLE.
|
**Return Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric types.
|
**Applicable Data Types**: Numeric types.
|
||||||
|
@ -729,6 +748,8 @@ TRIM([remstr FROM] expr)
|
||||||
|
|
||||||
**Function Description**: Returns the string expr with all prefixes or suffixes of remstr removed.
|
**Function Description**: Returns the string expr with all prefixes or suffixes of remstr removed.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: Same as the original type of the input field expr.
|
**Return Result Type**: Same as the original type of the input field expr.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -807,6 +828,8 @@ SUBSTRING/SUBSTR(expr FROM pos [FOR len])
|
||||||
- If `len` is less than 1, returns an empty string.
|
- If `len` is less than 1, returns an empty string.
|
||||||
- `pos` is 1-based; if `pos` is 0, returns an empty string.
|
- `pos` is 1-based; if `pos` is 0, returns an empty string.
|
||||||
- If `pos` + `len` exceeds `len(expr)`, returns the substring from `pos` to the end of the string, equivalent to executing `substring(expr, pos)`.
|
- If `pos` + `len` exceeds `len(expr)`, returns the substring from `pos` to the end of the string, equivalent to executing `substring(expr, pos)`.
|
||||||
|
- Function `SUBSTRING` is equal to `SUBSTR`, supported from ver-3.3.3.0.
|
||||||
|
- Syntax `SUBSTRING/SUBSTR(expr FROM pos [FOR len])` is supported from ver-3.3.3.0.
|
||||||
|
|
||||||
**Examples**:
|
**Examples**:
|
||||||
|
|
||||||
|
@ -845,6 +868,8 @@ SUBSTRING_INDEX(expr, delim, count)
|
||||||
|
|
||||||
**Function Description**: Returns a substring of `expr` cut at the position where the delimiter appears the specified number of times.
|
**Function Description**: Returns a substring of `expr` cut at the position where the delimiter appears the specified number of times.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: Same as the original type of the input field `expr`.
|
**Return Result Type**: Same as the original type of the input field `expr`.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -902,6 +927,8 @@ CHAR(expr1 [, expr2] [, expr3] ...)
|
||||||
|
|
||||||
**Function Description**: Treats the input parameters as integers and returns the characters corresponding to these integers in ASCII encoding.
|
**Function Description**: Treats the input parameters as integers and returns the characters corresponding to these integers in ASCII encoding.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: VARCHAR.
|
**Return Result Type**: VARCHAR.
|
||||||
|
|
||||||
**Applicable Data Types**: Integer types, VARCHAR, NCHAR.
|
**Applicable Data Types**: Integer types, VARCHAR, NCHAR.
|
||||||
|
@ -949,6 +976,8 @@ ASCII(expr)
|
||||||
|
|
||||||
**Function Description**: Returns the ASCII code of the first character of the string.
|
**Function Description**: Returns the ASCII code of the first character of the string.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Data Type**: BIGINT.
|
**Return Result Data Type**: BIGINT.
|
||||||
|
|
||||||
**Applicable Data Types**: VARCHAR, NCHAR.
|
**Applicable Data Types**: VARCHAR, NCHAR.
|
||||||
|
@ -979,6 +1008,8 @@ POSITION(expr1 IN expr2)
|
||||||
|
|
||||||
**Function Description**: Calculates the position of string `expr1` in string `expr2`.
|
**Function Description**: Calculates the position of string `expr1` in string `expr2`.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: BIGINT.
|
**Return Result Type**: BIGINT.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -1026,6 +1057,8 @@ REPLACE(expr, from_str, to_str)
|
||||||
|
|
||||||
**Function Description**: Replaces all occurrences of `from_str` in the string with `to_str`.
|
**Function Description**: Replaces all occurrences of `from_str` in the string with `to_str`.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: Same as the original type of the input field `expr`.
|
**Return Type**: Same as the original type of the input field `expr`.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -1061,6 +1094,8 @@ REPEAT(expr, count)
|
||||||
|
|
||||||
**Function Description**: Returns a string that repeats the string `expr` a specified number of times.
|
**Function Description**: Returns a string that repeats the string `expr` a specified number of times.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: Same as the original type of the input field `expr`.
|
**Return Type**: Same as the original type of the input field `expr`.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -1319,6 +1354,7 @@ TIMEDIFF(expr1, expr2 [, time_unit])
|
||||||
- `expr1`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format.
|
- `expr1`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format.
|
||||||
- `expr2`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format.
|
- `expr2`: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 standard date-time format.
|
||||||
- `time_unit`: See usage instructions.
|
- `time_unit`: See usage instructions.
|
||||||
|
- `timediff` return the absolute value of the difference between timestamp `expr1` and `expr2` before ver-3.3.3.0.
|
||||||
|
|
||||||
**Nested Subquery Support**: Applicable to both inner and outer queries.
|
**Nested Subquery Support**: Applicable to both inner and outer queries.
|
||||||
|
|
||||||
|
@ -1423,6 +1459,8 @@ WEEK(expr [, mode])
|
||||||
|
|
||||||
**Function Description**: Returns the week number of the input date.
|
**Function Description**: Returns the week number of the input date.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Result Type**: BIGINT.
|
**Return Result Type**: BIGINT.
|
||||||
|
|
||||||
**Applicable Data Types**:
|
**Applicable Data Types**:
|
||||||
|
@ -1490,6 +1528,8 @@ WEEKOFYEAR(expr)
|
||||||
|
|
||||||
**Function Description**: Returns the week number of the input date.
|
**Function Description**: Returns the week number of the input date.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: BIGINT.
|
**Return Type**: BIGINT.
|
||||||
|
|
||||||
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
||||||
|
@ -1521,6 +1561,8 @@ WEEKDAY(expr)
|
||||||
|
|
||||||
**Function Description**: Returns the weekday of the input date.
|
**Function Description**: Returns the weekday of the input date.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: BIGINT.
|
**Return Type**: BIGINT.
|
||||||
|
|
||||||
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
||||||
|
@ -1552,6 +1594,8 @@ DAYOFWEEK(expr)
|
||||||
|
|
||||||
**Function Description**: Returns the weekday of the input date.
|
**Function Description**: Returns the weekday of the input date.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Type**: BIGINT.
|
**Return Type**: BIGINT.
|
||||||
|
|
||||||
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
**Applicable Data Types**: BIGINT, TIMESTAMP types representing timestamps, or VARCHAR, NCHAR types in ISO8601/RFC3339 date-time format.
|
||||||
|
@ -1707,6 +1751,9 @@ STDDEV/STDDEV_POP(expr)
|
||||||
|
|
||||||
**Applicable to**: Tables and supertables.
|
**Applicable to**: Tables and supertables.
|
||||||
|
|
||||||
|
**Description**:
|
||||||
|
- Function `STDDEV_POP` equals `STDDEV` and is supported from ver-3.3.3.0.
|
||||||
|
|
||||||
**Example**:
|
**Example**:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -1733,6 +1780,8 @@ VAR_POP(expr)
|
||||||
|
|
||||||
**Function Description**: Calculates the population variance of a column in a table.
|
**Function Description**: Calculates the population variance of a column in a table.
|
||||||
|
|
||||||
|
**Version**: ver-3.3.3.0
|
||||||
|
|
||||||
**Return Data Type**: DOUBLE.
|
**Return Data Type**: DOUBLE.
|
||||||
|
|
||||||
**Applicable Data Types**: Numeric types.
|
**Applicable Data Types**: Numeric types.
|
||||||
|
@ -1975,7 +2024,8 @@ MAX(expr)
|
||||||
|
|
||||||
**Applicable to**: Tables and supertables.
|
**Applicable to**: Tables and supertables.
|
||||||
|
|
||||||
**Usage Instructions**: The max function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value.
|
**Usage Instructions**:
|
||||||
|
- The max function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value(supported from ver-3.3.3.0, function `max` only accept numeric parameter before ver-3.3.3.0).
|
||||||
|
|
||||||
### MIN
|
### MIN
|
||||||
|
|
||||||
|
@ -1991,7 +2041,8 @@ MIN(expr)
|
||||||
|
|
||||||
**Applicable to**: Tables and supertables.
|
**Applicable to**: Tables and supertables.
|
||||||
|
|
||||||
**Usage Instructions**: The min function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value.
|
**Usage Instructions**:
|
||||||
|
- The min function can accept strings as input parameters, and when the input parameter is a string type, it returns the largest string value(supported from ver-3.3.3.0, function `min` only accept numeric parameter before ver-3.3.3.0).
|
||||||
|
|
||||||
### MODE
|
### MODE
|
||||||
|
|
||||||
|
|
|
@ -130,11 +130,25 @@ The forward sliding time of SLIDING cannot exceed the time range of one window.
|
||||||
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The INTERVAL clause allows the use of the AUTO keyword to specify the window offset. If the WHERE condition provides a clear applicable start time limit, the required offset will be automatically calculated, dividing the time window from that point; otherwise, it defaults to an offset of 0. Here are some simple examples:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- With a start time limit, divide the time window from '2018-10-03 14:38:05'
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts >= '2018-10-03 14:38:05' INTERVAL (1m, AUTO);
|
||||||
|
|
||||||
|
-- Without a start time limit, defaults to an offset of 0
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts < '2018-10-03 15:00:00' INTERVAL (1m, AUTO);
|
||||||
|
|
||||||
|
-- Unclear start time limit, defaults to an offset of 0
|
||||||
|
SELECT COUNT(*) FROM meters WHERE _rowts - voltage > 1000000;
|
||||||
|
```
|
||||||
|
|
||||||
When using time windows, note:
|
When using time windows, note:
|
||||||
|
|
||||||
- The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward.
|
- The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward.
|
||||||
- When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts.
|
- When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts.
|
||||||
- The returned results have a strictly monotonically increasing time-series.
|
- The returned results have a strictly monotonically increasing time-series.
|
||||||
|
- When using AUTO as the window offset, if the window width unit is d (day), n (month), w (week), y (year), such as: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO), the TSMA optimization cannot take effect. If TSMA is manually created on the target table, the statement will report an error and exit; in this case, you can explicitly specify the Hint SKIP_TSMA or not use AUTO as the window offset.
|
||||||
|
|
||||||
### State Window
|
### State Window
|
||||||
|
|
||||||
|
|
|
@ -41,38 +41,28 @@ If there is a single replica on the node and the node is offline, to forcibly de
|
||||||
ALTER DNODE dnode_id dnode_option
|
ALTER DNODE dnode_id dnode_option
|
||||||
|
|
||||||
ALTER ALL DNODES dnode_option
|
ALTER ALL DNODES dnode_option
|
||||||
|
|
||||||
dnode_option: {
|
|
||||||
'resetLog'
|
|
||||||
| 'balance' 'value'
|
|
||||||
| 'monitor' 'value'
|
|
||||||
| 'debugFlag' 'value'
|
|
||||||
| 'monDebugFlag' 'value'
|
|
||||||
| 'vDebugFlag' 'value'
|
|
||||||
| 'mDebugFlag' 'value'
|
|
||||||
| 'cDebugFlag' 'value'
|
|
||||||
| 'httpDebugFlag' 'value'
|
|
||||||
| 'qDebugflag' 'value'
|
|
||||||
| 'sdbDebugFlag' 'value'
|
|
||||||
| 'uDebugFlag' 'value'
|
|
||||||
| 'tsdbDebugFlag' 'value'
|
|
||||||
| 'sDebugflag' 'value'
|
|
||||||
| 'rpcDebugFlag' 'value'
|
|
||||||
| 'dDebugFlag' 'value'
|
|
||||||
| 'mqttDebugFlag' 'value'
|
|
||||||
| 'wDebugFlag' 'value'
|
|
||||||
| 'tmrDebugFlag' 'value'
|
|
||||||
| 'cqDebugFlag' 'value'
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The modifiable configuration items in the syntax above are configured in the same way as in the dnode configuration file, the difference being that modifications are dynamic, take immediate effect, and do not require restarting the dnode.
|
For configuration parameters that support dynamic modification, you can use the ALTER DNODE or ALTER ALL DNODES syntax to modify the values of configuration parameters in a dnode. Starting from version 3.3.4.0, the modified configuration parameters will be automatically persisted and will remain effective even after the database service is restarted.
|
||||||
|
|
||||||
`value` is the value of the parameter, which needs to be in string format. For example, to change the log output level of dnode 1 to debug:
|
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](../01-components/01-taosd.md)
|
||||||
|
|
||||||
|
The value is the parameter's value and needs to be in character format. For example, to change the log output level of dnode 1 to debug:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER DNODE 1 'debugFlag' '143';
|
ALTER DNODE 1 'debugFlag' '143';
|
||||||
```
|
```
|
||||||
|
### Additional Notes:
|
||||||
|
Configuration parameters in a dnode are divided into global configuration parameters and local configuration parameters. You can check the category field in SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE to determine whether a configuration parameter is a global configuration parameter or a local configuration parameter:
|
||||||
|
|
||||||
|
Local configuration parameters: You can use ALTER DNODE or ALTER ALL DNODES to update the local configuration parameters of a specific dnode or all dnodes.
|
||||||
|
Global configuration parameters: Global configuration parameters require consistency across all dnodes, so you can only use ALTER ALL DNODES to update the global configuration parameters of all dnodes.
|
||||||
|
There are three cases for whether a configuration parameter can be dynamically modified:
|
||||||
|
|
||||||
|
Supports dynamic modification, effective immediately
|
||||||
|
Supports dynamic modification, effective after restart
|
||||||
|
Does not support dynamic modification
|
||||||
|
For configuration parameters that take effect after a restart, you can see the modified values through SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE, but you need to restart the database service to make them effective.
|
||||||
|
|
||||||
## Add Management Node
|
## Add Management Node
|
||||||
|
|
||||||
|
@ -136,18 +126,12 @@ If the client is also considered as part of the cluster in a broader sense, the
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER LOCAL local_option
|
ALTER LOCAL local_option
|
||||||
|
|
||||||
local_option: {
|
|
||||||
'resetLog'
|
|
||||||
| 'rpcDebugFlag' 'value'
|
|
||||||
| 'tmrDebugFlag' 'value'
|
|
||||||
| 'cDebugFlag' 'value'
|
|
||||||
| 'uDebugFlag' 'value'
|
|
||||||
| 'debugFlag' 'value'
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The parameters in the syntax above are used in the same way as in the configuration file for the client, but do not require a restart of the client, and the changes take effect immediately.
|
You can use the above syntax to modify the client's configuration parameters, and there is no need to restart the client. The changes take effect immediately.
|
||||||
|
|
||||||
|
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](../01-components/02-taosc.md)
|
||||||
|
|
||||||
|
|
||||||
## View Client Configuration
|
## View Client Configuration
|
||||||
|
|
||||||
|
|
|
@ -317,10 +317,43 @@ Configuration parameters for each dnode in the system. Users with SYSINFO attrib
|
||||||
|
|
||||||
Note: Users with SYSINFO property set to 0 cannot view this table.
|
Note: Users with SYSINFO property set to 0 cannot view this table.
|
||||||
|
|
||||||
| # | **Column Name** | **Data Type** | **Description** |
|
| # | **Column Name** | **Data Type** | **Description** |
|
||||||
| ---- | :-------------: | -------------- | ----------------------------------- |
|
|:-----|:----------------|:---------------|:-------------------------------------|
|
||||||
| 1 | user_name | VARCHAR(24) | Username |
|
| 1 | user_name | VARCHAR(24) | Username |
|
||||||
| 2 | privilege | VARCHAR(10) | Privilege description |
|
| 2 | privilege | VARCHAR(10) | Permission description |
|
||||||
| 3 | db_name | VARCHAR(65) | Database name |
|
| 3 | db_name | VARCHAR(65) | Database name |
|
||||||
| 4 | table_name | VARCHAR(193) | Table name |
|
| 4 | table_name | VARCHAR(193) | Table name |
|
||||||
| 5 | condition | VARCHAR(49152) | Subtable privilege filter condition |
|
| 5 | condition | VARCHAR(49152) | Subtable permission filter condition |
|
||||||
|
|
||||||
|
|
||||||
|
## INS_DISK_USAGE
|
||||||
|
|
||||||
|
| # | **Column Name** | **Data type** | **Description**|
|
||||||
|
|:----|:-----------|:-----------|:--------------------|
|
||||||
|
| 1 | db_name | VARCHAR(32) | Database name |
|
||||||
|
| 2 | vgroup_id | INT | vgroup ID |
|
||||||
|
| 3 | wal | BIGINT | WAL file size, in KB |
|
||||||
|
| 4 | data1 | BIGINT | Data file size on primary storage, in KB |
|
||||||
|
| 5 | data2 | BIGINT | Data file size on secondary storage, in KB |
|
||||||
|
| 6 | data3 | BIGINT | Data file size on tertiary storage, in KB |
|
||||||
|
| 7 | cache_rdb | BIGINT | Size of last/last_row files, in KB |
|
||||||
|
| 8 | table_meta | BIGINT | Size of meta files, in KB |
|
||||||
|
| 9 | s3 | BIGINT | Size occupied on S3, in KB |
|
||||||
|
| 10 | raw_data | BIGINT | Estimated size of raw data, in KB |
|
||||||
|
|
||||||
|
note:
|
||||||
|
|
||||||
|
## INS_FILESETS
|
||||||
|
|
||||||
|
Provides information about file sets.
|
||||||
|
|
||||||
|
| # | **Column** | **Data Type** | **Description** | ** |
|
||||||
|
| --- | :-----------: | ------------- | ---------------------------------------------------- |
|
||||||
|
| 1 | db_name | VARCHAR(65) | Database name |
|
||||||
|
| 2 | vgroup_id | INT | Vgroup ID |
|
||||||
|
| 3 | fileset_id | INT | File set ID |
|
||||||
|
| 4 | start_time | TIMESTAMP | Start time of the time range covered by the file set |
|
||||||
|
| 5 | end_time | TIMESTAMP | End time of the time range covered by the file set |
|
||||||
|
| 6 | total_size | BIGINT | Total size of the file set |
|
||||||
|
| 7 | last_compact | TIMESTAMP | Time of the last compaction |
|
||||||
|
| 8 | shold_compact | bool | Whether the file set should be compacted |
|
||||||
|
|
|
@ -8,7 +8,7 @@ User and permission management is a feature of TDengine Enterprise Edition. This
|
||||||
## Create User
|
## Create User
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
|
CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
||||||
```
|
```
|
||||||
|
|
||||||
The username can be up to 23 bytes long.
|
The username can be up to 23 bytes long.
|
||||||
|
@ -17,6 +17,8 @@ The password must be between 8 and 16 characters long and include at least three
|
||||||
|
|
||||||
`SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`.
|
`SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`.
|
||||||
|
|
||||||
|
`CREATEDB` indicates whether the user can create databases. `1` means they can create databases, `0` means they have no permission to create databases. The default value is `0`. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
In the example below, we create a user with the password `abc123!@#` who can view system information.
|
In the example below, we create a user with the password `abc123!@#` who can view system information.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -76,7 +78,7 @@ alter_user_clause: {
|
||||||
- PASS: Change the password, followed by the new password
|
- PASS: Change the password, followed by the new password
|
||||||
- ENABLE: Enable or disable the user, `1` means enable, `0` means disable
|
- ENABLE: Enable or disable the user, `1` means enable, `0` means disable
|
||||||
- SYSINFO: Allow or prohibit viewing system information, `1` means allow, `0` means prohibit
|
- SYSINFO: Allow or prohibit viewing system information, `1` means allow, `0` means prohibit
|
||||||
- CREATEDB: Allow or prohibit creating databases, `1` means allow, `0` means prohibit
|
- CREATEDB: Allow or prohibit creating databases, `1` means allow, `0` means prohibit. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||||
|
|
||||||
The following example disables the user named `test`:
|
The following example disables the user named `test`:
|
||||||
|
|
||||||
|
|
|
@ -35,9 +35,9 @@ Please refer to the [Supported Platforms List](../#supported-platforms)
|
||||||
|
|
||||||
### Version History
|
### Version History
|
||||||
|
|
||||||
| TDengine Client Version | Main Changes | TDengine Version |
|
| TDengine Client Version | Major Changes | TDengine Version |
|
||||||
| :------------------: | :---------------------------: | :----------------: |
|
| ------------------ | --------------------------- | ---------------- |
|
||||||
| 3.3.3.0 | First release, providing comprehensive support for SQL execution, parameter binding, schema-less writing, and data subscription. | 3.3.2.0 and higher versions |
|
| 3.3.3.0 | First release, providing comprehensive support for SQL execution, parameter binding, schema-less writing, and data subscription. | 3.3.2.0 and higher |
|
||||||
|
|
||||||
### Error Codes
|
### Error Codes
|
||||||
|
|
||||||
|
@ -688,6 +688,27 @@ The basic API is used to establish database connections and provide a runtime en
|
||||||
- `arg`: [Input] Setting item value.
|
- `arg`: [Input] Setting item value.
|
||||||
- **Return Value**: `0`: Success, `-1`: Failure.
|
- **Return Value**: `0`: Success, `-1`: Failure.
|
||||||
|
|
||||||
|
- `int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...)`
|
||||||
|
- **description**:Set each connection option on the client side. Currently, it supports character set setting(`TSDB_OPTION_CONNECTION_CHARSET`), time zone setting(`TSDB_OPTION_CONNECTION_TIMEZONE`), user IP setting(`TSDB_OPTION_CONNECTION_USER_IP`), and user APP setting(`TSDB_OPTION_CONNECTION_USER_APP`).
|
||||||
|
- **input**:
|
||||||
|
- `taos`: returned by taos_connect.
|
||||||
|
- `option`: option name.
|
||||||
|
- `arg`: option value.
|
||||||
|
- **return**:
|
||||||
|
- `0`: success.
|
||||||
|
- `others`: fail.
|
||||||
|
- **notice**:
|
||||||
|
- The character set and time zone default to the current settings of the operating system, and Windows does not support connection level time zone settings.
|
||||||
|
- When arg is NULL, it means resetting the option.
|
||||||
|
- This interface is only valid for the current connection and will not affect other connections.
|
||||||
|
- If the same parameter is called multiple times, the latter shall prevail and can be used as a modification method.
|
||||||
|
- The option of TSDB_OPTION_CONNECTION_CLEAR is used to reset all connection options.
|
||||||
|
- After resetting the time zone and character set, using the operating system settings, the user IP and user app will be reset to empty.
|
||||||
|
- The values of the connection options are all string type, and the maximum value of the user app parameter is 23, which will be truncated if exceeded; Error reported when other parameters are illegal.
|
||||||
|
- If time zone value can not be used to find a time zone file or can not be interpreted as a direct specification, UTC is used, which is the same as the operating system time zone rules. Please refer to the tzset function description for details. You can view the current time zone of the connection by sql:select timezone().
|
||||||
|
- Time zones and character sets only work on the client side and do not affect related behaviors on the server side.
|
||||||
|
- The time zone file uses the operating system time zone file and can be updated by oneself. If there is an error when setting the time zone, please check if the time zone file or path (mac:/var/db/timezone/zoneinfo, Linux:/var/share/zoneinfo) is correct.
|
||||||
|
|
||||||
- `char *taos_get_client_info()`
|
- `char *taos_get_client_info()`
|
||||||
- **Interface Description**: Gets client version information.
|
- **Interface Description**: Gets client version information.
|
||||||
- **Return Value**: Returns client version information.
|
- **Return Value**: Returns client version information.
|
||||||
|
|
|
@ -6,7 +6,7 @@ slug: /tdengine-reference/client-libraries/java
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
import RequestId from "./_request_id.mdx";
|
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||||
|
|
||||||
`taos-jdbcdriver` is the official Java connector for TDengine, allowing Java developers to develop applications that access the TDengine database. `taos-jdbcdriver` implements the interfaces of the JDBC driver standard.
|
`taos-jdbcdriver` is the official Java connector for TDengine, allowing Java developers to develop applications that access the TDengine database. `taos-jdbcdriver` implements the interfaces of the JDBC driver standard.
|
||||||
|
|
||||||
|
@ -18,45 +18,45 @@ The JDBC driver implementation for TDengine strives to be consistent with relati
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## JDBC and JRE Compatibility
|
## JDBC and JRE Version Compatibility
|
||||||
|
|
||||||
- JDBC: Supports JDBC 4.2, with some features like schemaless writing and data subscription provided separately
|
- JDBC: Supports JDBC 4.2 and above.
|
||||||
- JRE: Supports JRE 8 and above
|
- JRE: Supports JRE 8 and above.
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
Native connection supports the same platforms as the TDengine client driver.
|
- Native connection supports the same platforms as the TDengine client driver.
|
||||||
REST connection supports all platforms that can run Java.
|
- WebSocket/REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
## Version History
|
## Version History
|
||||||
|
|
||||||
| taos-jdbcdriver Version | Main Changes | TDengine Version |
|
| taos-jdbcdriver Version | Major Changes | TDengine Version |
|
||||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||||
| 3.4.0 | 1. Replaced fastjson library with jackson; 2. WebSocket uses a separate protocol identifier; 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||||
| 3.3.4 | 1. Fixed getInt error when data type is float | - |
|
| 3.3.4 | Fixed getInt error when data type is float. | - |
|
||||||
| 3.3.3 | 1. Fixed memory leak caused by closing WebSocket statement | - |
|
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
|
||||||
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection; 2. Improved support for mybatis | - |
|
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
|
||||||
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection; 2. Supports skipping SSL verification, off by default | 3.3.2.0 and higher |
|
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
|
||||||
| 3.2.11 | Fixed a bug in closing result set in Native connection | - |
|
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
|
||||||
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission; 2. WebSocket automatic reconnection mechanism, off by default; 3. Connection class provides methods for schemaless writing; 4. Optimized data fetching performance for native connections; 5. Fixed some known issues; 6. Metadata retrieval functions can return a list of supported functions. | - |
|
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
|
||||||
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement | - |
|
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
|
||||||
| 3.2.8 | Optimized auto-commit, fixed manual commit bug in WebSocket, optimized WebSocket prepareStatement using a single connection, metadata supports views | - |
|
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
|
||||||
| 3.2.7 | Supports VARBINARY and GEOMETRY types, added timezone setting support for native connections. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
||||||
| 3.2.5 | Data subscription adds committed() and assignment() methods | 3.1.0.3 and higher |
|
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
|
||||||
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
||||||
| 3.2.3 | Fixed ResultSet data parsing failure in some cases | - |
|
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
|
||||||
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
||||||
| 3.2.1 | New feature: WebSocket connection supports schemaless and prepareStatement writing. Change: consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
||||||
| 3.2.0 | Connection issues, not recommended for use | - |
|
| 3.2.0 | Connection issues, not recommended for use. | - |
|
||||||
| 3.1.0 | WebSocket connection supports subscription function | - |
|
| 3.1.0 | WebSocket connection supports subscription function. | - |
|
||||||
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8 | - |
|
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
|
||||||
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
||||||
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection | - |
|
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
|
||||||
| 2.0.41 | Fixed username and password encoding method in REST connection | - |
|
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
|
||||||
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings | - |
|
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
|
||||||
| 2.0.38 | JDBC REST connection adds batch fetching function | - |
|
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
|
||||||
| 2.0.37 | Added support for json tag | - |
|
| 2.0.37 | Added support for json tag. | - |
|
||||||
| 2.0.36 | Added support for schemaless writing | - |
|
| 2.0.36 | Added support for schemaless writing. | - |
|
||||||
|
|
||||||
## Exceptions and Error Codes
|
## Exceptions and Error Codes
|
||||||
|
|
||||||
|
|
|
@ -6,24 +6,42 @@ slug: /tdengine-reference/client-libraries/go
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
import RequestId from "./_request_id.mdx";
|
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||||
|
|
||||||
`driver-go` is the official Go language connector for TDengine, implementing the interface of the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access data in the TDengine cluster.
|
`driver-go` is the official Go language connector for TDengine, implementing the interface of the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access data in the TDengine cluster.
|
||||||
|
|
||||||
## Compatibility
|
## Go Version Compatibility
|
||||||
|
|
||||||
Supports a minimum Go version of 1.14, but the latest version of Go is recommended.
|
Supports Go 1.14 and above.
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
Native connections support the same platforms as the TDengine client driver.
|
- Native connections support the same platforms as the TDengine client driver.
|
||||||
REST connections support all platforms that can run Go.
|
- WebSocket/REST connections support all platforms that can run Go.
|
||||||
|
|
||||||
## Version Support
|
## Version History
|
||||||
|
|
||||||
Please refer to the [version support list](https://github.com/taosdata/driver-go#remind).
|
| driver-go Version | Major Changes | TDengine Version |
|
||||||
|
|------------------|------------------------------------------------------------------|-------------------|
|
||||||
|
| v3.5.8 | Fixed null pointer exception. | - |
|
||||||
|
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
|
||||||
|
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
|
||||||
|
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
|
||||||
|
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
|
||||||
|
| v3.5.3 | Refactored taosWS. | - |
|
||||||
|
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
|
||||||
|
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
|
||||||
|
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
|
||||||
|
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
|
||||||
|
| v3.1.0 | Provided Kafka-like subscription API. | - |
|
||||||
|
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
|
||||||
|
| v3.0.3 | Websocket-based statement insert. | - |
|
||||||
|
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
|
||||||
|
| v3.0.1 | Websocket-based message subscription. | - |
|
||||||
|
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
|
||||||
|
|
||||||
## Handling Exceptions
|
|
||||||
|
## Exceptions and Error Codes
|
||||||
|
|
||||||
If it is a TDengine error, you can obtain the error code and error message as follows.
|
If it is a TDengine error, you can obtain the error code and error message as follows.
|
||||||
|
|
||||||
|
|
|
@ -4,11 +4,11 @@ title: Rust Client Library
|
||||||
slug: /tdengine-reference/client-libraries/rust
|
slug: /tdengine-reference/client-libraries/rust
|
||||||
---
|
---
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
import Preparation from "./_preparation.mdx"
|
import Preparation from "../../assets/resources/_preparation.mdx"
|
||||||
import RequestId from "./_request_id.mdx";
|
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||||
|
|
||||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||||
|
|
||||||
|
@ -16,37 +16,30 @@ import RequestId from "./_request_id.mdx";
|
||||||
|
|
||||||
The source code for this Rust connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-rust).
|
The source code for this Rust connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-rust).
|
||||||
|
|
||||||
## Connection Methods
|
## Rust Version Compatibility
|
||||||
|
|
||||||
`taos` provides two ways to establish a connection. Generally, we recommend using **WebSocket Connection**.
|
Supports Rust 1.70 and above.
|
||||||
|
|
||||||
- **Native Connection**, which connects to a TDengine instance via the TDengine client driver (taosc).
|
|
||||||
- **WebSocket Connection**, which connects to a TDengine instance via the WebSocket interface of taosAdapter.
|
|
||||||
|
|
||||||
You can specify which connector to use through different "features (i.e., the Cargo keyword `features`)" (both are supported by default).
|
|
||||||
|
|
||||||
For a detailed introduction to connection methods, please refer to: [Connection Methods](../../../developer-guide/connecting-to-tdengine/)
|
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
The platforms supported by the native connection are consistent with those supported by the TDengine client driver.
|
- The platforms supported by the native connection are consistent with those supported by the TDengine client driver.
|
||||||
WebSocket connection supports all platforms that can run Rust.
|
- WebSocket connection supports all platforms that can run Rust.
|
||||||
|
|
||||||
## Version History
|
## Version History
|
||||||
|
|
||||||
| Rust Connector Version | TDengine Version | Main Features |
|
| Rust Connector Version | Major Changes | TDengine Version |
|
||||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
| ---------------------- | ----------------------------------------------------------------------------------------------------- | ------------------ |
|
||||||
| v0.12.3 | 3.3.0.0 or later | Optimized WebSocket query and insertion performance, support for VARBINARY and GEOMETRY types |
|
| v0.12.3 | 1. Optimized WebSocket query and insert performance. <br/> 2. Supported VARBINARY and GEOMETRY types. | 3.3.0.0 and higher |
|
||||||
| v0.12.0 | 3.2.3.0 or later | WS supports compression. |
|
| v0.12.0 | WebSocket supports compression. | 3.2.3.0 and higher |
|
||||||
| v0.11.0 | 3.2.0.0 | TMQ feature optimization. |
|
| v0.11.0 | TMQ feature optimization. | 3.2.0.0 and higher |
|
||||||
| v0.10.0 | 3.1.0.0 | WS endpoint change. |
|
| v0.10.0 | WebSocket endpoint change. | 3.1.0.0 and higher |
|
||||||
| v0.9.2 | 3.0.7.0 | STMT: ws to get tag_fields, col_fields. |
|
| v0.9.2 | STMT: WebSocket to get tag_fields, col_fields. | 3.0.7.0 and higher |
|
||||||
| v0.8.12 | 3.0.5.0 | Message subscription: get consumption progress and start consuming at a specified progress. |
|
| v0.8.12 | Message subscription: get consumption progress and start consuming at a specified progress. | 3.0.5.0 and higher |
|
||||||
| v0.8.0 | 3.0.4.0 | Supports schema-less writing. |
|
| v0.8.0 | Supports schema-less writing. | 3.0.4.0 and higher |
|
||||||
| v0.7.6 | 3.0.3.0 | Supports using req_id in requests. |
|
| v0.7.6 | Supports using req_id in requests. | 3.0.3.0 and higher |
|
||||||
| v0.6.0 | 3.0.0.0 | Basic functionality. |
|
| v0.6.0 | Basic functionality. | 3.0.0.0 and higher |
|
||||||
|
|
||||||
## Error Handling
|
## Exceptions and Error Codes
|
||||||
|
|
||||||
After an error occurs, you can obtain detailed information about the error:
|
After an error occurs, you can obtain detailed information about the error:
|
||||||
|
|
||||||
|
@ -81,14 +74,14 @@ TDengine currently supports timestamp, numeric, character, and boolean types, wi
|
||||||
| BINARY | Vec\<u8> |
|
| BINARY | Vec\<u8> |
|
||||||
| NCHAR | String |
|
| NCHAR | String |
|
||||||
| JSON | serde_json::Value |
|
| JSON | serde_json::Value |
|
||||||
| VARBINARY | Bytes |
|
| VARBINARY | Bytes |
|
||||||
| GEOMETRY | Bytes |
|
| GEOMETRY | Bytes |
|
||||||
|
|
||||||
**Note**: The JSON type is only supported in tags.
|
**Note**: The JSON type is only supported in tags.
|
||||||
|
|
||||||
## Summary of Example Programs
|
## Summary of Example Programs
|
||||||
|
|
||||||
For the source code of the example programs, please refer to: [rust example](https://github.com/taosdata/TDengine/tree/main/docs/examples/rust)
|
Please refer to: [rust example](https://github.com/taosdata/TDengine/tree/main/docs/examples/rust)
|
||||||
|
|
||||||
## Frequently Asked Questions
|
## Frequently Asked Questions
|
||||||
|
|
||||||
|
@ -132,26 +125,31 @@ A complete DSN description string example is as follows: `taos+ws://localhost:60
|
||||||
The TaosBuilder struct primarily provides methods for building Taos objects based on DSN, as well as features for checking connections and obtaining the client version number.
|
The TaosBuilder struct primarily provides methods for building Taos objects based on DSN, as well as features for checking connections and obtaining the client version number.
|
||||||
|
|
||||||
- `fn available_params() -> &'static [&'static str]`
|
- `fn available_params() -> &'static [&'static str]`
|
||||||
|
|
||||||
- **Interface Description**: Retrieves a list of available parameters in the DSN.
|
- **Interface Description**: Retrieves a list of available parameters in the DSN.
|
||||||
- **Return Value**: Returns a reference to a static slice of strings containing the names of available parameters.
|
- **Return Value**: Returns a reference to a static slice of strings containing the names of available parameters.
|
||||||
|
|
||||||
- `fn from_dsn<D: IntoDsn>(dsn: D) -> RawResult<Self>`
|
- `fn from_dsn<D: IntoDsn>(dsn: D) -> RawResult<Self>`
|
||||||
|
|
||||||
- **Interface Description**: Creates a connection using a DSN string without checking the connection.
|
- **Interface Description**: Creates a connection using a DSN string without checking the connection.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `dsn`: DSN string or a type that can be converted into a DSN.
|
- `dsn`: DSN string or a type that can be converted into a DSN.
|
||||||
- **Return Value**: On success, returns a `RawResult` of its own type; on failure, returns an error.
|
- **Return Value**: On success, returns a `RawResult` of its own type; on failure, returns an error.
|
||||||
|
|
||||||
- `fn client_version() -> &'static str`
|
- `fn client_version() -> &'static str`
|
||||||
|
|
||||||
- **Interface Description**: Gets the client version.
|
- **Interface Description**: Gets the client version.
|
||||||
- **Return Value**: Returns a static string of the client version.
|
- **Return Value**: Returns a static string of the client version.
|
||||||
|
|
||||||
- `fn ping(&self, _: &mut Self::Target) -> RawResult<()>`
|
- `fn ping(&self, _: &mut Self::Target) -> RawResult<()>`
|
||||||
|
|
||||||
- **Interface Description**: Checks if the connection is still alive.
|
- **Interface Description**: Checks if the connection is still alive.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `_`: Mutable reference to the target connection.
|
- `_`: Mutable reference to the target connection.
|
||||||
- **Return Value**: On success, returns an empty `RawResult`; on failure, returns an error.
|
- **Return Value**: On success, returns an empty `RawResult`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn ready(&self) -> bool`
|
- `fn ready(&self) -> bool`
|
||||||
|
|
||||||
- **Interface Description**: Checks if it is ready to connect.
|
- **Interface Description**: Checks if it is ready to connect.
|
||||||
- **Return Value**: Mostly returns `true`, indicating the address is ready for connection.
|
- **Return Value**: Mostly returns `true`, indicating the address is ready for connection.
|
||||||
|
|
||||||
|
@ -168,20 +166,24 @@ Executing SQL primarily uses the Taos struct, and obtaining the result set and m
|
||||||
The Taos struct provides multiple database operation APIs, including: executing SQL, schema-less writing, and some common database query encapsulations (such as creating databases, fetching)
|
The Taos struct provides multiple database operation APIs, including: executing SQL, schema-less writing, and some common database query encapsulations (such as creating databases, fetching)
|
||||||
|
|
||||||
- `pub fn is_native(&self) -> bool`
|
- `pub fn is_native(&self) -> bool`
|
||||||
|
|
||||||
- **Interface Description**: Determines if the connection uses a native protocol.
|
- **Interface Description**: Determines if the connection uses a native protocol.
|
||||||
- **Return Value**: Returns `true` if using a native protocol, otherwise returns `false`.
|
- **Return Value**: Returns `true` if using a native protocol, otherwise returns `false`.
|
||||||
|
|
||||||
- `pub fn is_ws(&self) -> bool`
|
- `pub fn is_ws(&self) -> bool`
|
||||||
|
|
||||||
- **Interface Description**: Determines if the connection uses the WebSocket protocol.
|
- **Interface Description**: Determines if the connection uses the WebSocket protocol.
|
||||||
- **Return Value**: Returns `true` if using the WebSocket protocol, otherwise returns `false`.
|
- **Return Value**: Returns `true` if using the WebSocket protocol, otherwise returns `false`.
|
||||||
|
|
||||||
- `fn query<T: AsRef<str>>(&self, sql: T) -> RawResult<Self::ResultSet>`
|
- `fn query<T: AsRef<str>>(&self, sql: T) -> RawResult<Self::ResultSet>`
|
||||||
|
|
||||||
- **Interface Description**: Executes an SQL query.
|
- **Interface Description**: Executes an SQL query.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `sql`: The SQL statement to execute.
|
- `sql`: The SQL statement to execute.
|
||||||
- **Return Value**: On success, returns a `RawResult` of the `ResultSet`; on failure, returns an error.
|
- **Return Value**: On success, returns a `RawResult` of the `ResultSet`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn query_with_req_id<T: AsRef<str>>(&self, sql: T, req_id: u64) -> RawResult<Self::ResultSet>`
|
- `fn query_with_req_id<T: AsRef<str>>(&self, sql: T, req_id: u64) -> RawResult<Self::ResultSet>`
|
||||||
|
|
||||||
- **Interface Description**: Executes an SQL query with a request ID.
|
- **Interface Description**: Executes an SQL query with a request ID.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `sql`: The SQL statement to execute.
|
- `sql`: The SQL statement to execute.
|
||||||
|
@ -189,28 +191,33 @@ The Taos struct provides multiple database operation APIs, including: executing
|
||||||
- **Return Value**: On success, returns a `RawResult` of the `ResultSet`; on failure, returns an error.
|
- **Return Value**: On success, returns a `RawResult` of the `ResultSet`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn exec<T: AsRef<str>>(&self, sql: T) -> RawResult<usize>`
|
- `fn exec<T: AsRef<str>>(&self, sql: T) -> RawResult<usize>`
|
||||||
|
|
||||||
- **Interface Description**: Executes an SQL statement.
|
- **Interface Description**: Executes an SQL statement.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `sql`: The SQL statement to execute.
|
- `sql`: The SQL statement to execute.
|
||||||
- **Return Value**: On success, returns the number of affected rows; on failure, returns an error.
|
- **Return Value**: On success, returns the number of affected rows; on failure, returns an error.
|
||||||
|
|
||||||
- `fn exec_many<T: AsRef<str>, I: IntoIterator<Item = T>>(&self, input: I) -> RawResult<usize>`
|
- `fn exec_many<T: AsRef<str>, I: IntoIterator<Item = T>>(&self, input: I) -> RawResult<usize>`
|
||||||
|
|
||||||
- **Interface Description**: Executes multiple SQL statements in batch.
|
- **Interface Description**: Executes multiple SQL statements in batch.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `input`: Collection of SQL statements to execute.
|
- `input`: Collection of SQL statements to execute.
|
||||||
- **Return Value**: On success, returns the total number of affected rows; on failure, returns an error.
|
- **Return Value**: On success, returns the total number of affected rows; on failure, returns an error.
|
||||||
|
|
||||||
- `fn query_one<T: AsRef<str>, O: DeserializeOwned>(&self, sql: T) -> RawResult<Option<O>>`
|
- `fn query_one<T: AsRef<str>, O: DeserializeOwned>(&self, sql: T) -> RawResult<Option<O>>`
|
||||||
|
|
||||||
- **Interface Description**: Executes an SQL query and returns a single result.
|
- **Interface Description**: Executes an SQL query and returns a single result.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `sql`: The SQL statement to execute.
|
- `sql`: The SQL statement to execute.
|
||||||
- **Return Value**: On success, returns an optional result object; on failure, returns an error.
|
- **Return Value**: On success, returns an optional result object; on failure, returns an error.
|
||||||
|
|
||||||
- `fn server_version(&self) -> RawResult<Cow<str>>`
|
- `fn server_version(&self) -> RawResult<Cow<str>>`
|
||||||
|
|
||||||
- **Interface Description**: Gets the server version.
|
- **Interface Description**: Gets the server version.
|
||||||
- **Return Value**: On success, returns the server version string as a `RawResult`; on failure, returns an error.
|
- **Return Value**: On success, returns the server version string as a `RawResult`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn create_topic(&self, name: impl AsRef<str>, sql: impl AsRef<str>) -> RawResult<()>`
|
- `fn create_topic(&self, name: impl AsRef<str>, sql: impl AsRef<str>) -> RawResult<()>`
|
||||||
|
|
||||||
- **Interface Description**: Creates a topic.
|
- **Interface Description**: Creates a topic.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `name`: The name of the topic.
|
- `name`: The name of the topic.
|
||||||
|
@ -218,20 +225,24 @@ The Taos struct provides multiple database operation APIs, including: executing
|
||||||
- **Return Value**: On success, returns an empty `RawResult`; on failure, returns an error.
|
- **Return Value**: On success, returns an empty `RawResult`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn databases(&self) -> RawResult<Vec<ShowDatabase>>`
|
- `fn databases(&self) -> RawResult<Vec<ShowDatabase>>`
|
||||||
|
|
||||||
- **Interface Description**: Retrieves a list of databases.
|
- **Interface Description**: Retrieves a list of databases.
|
||||||
- **Return Value**: On success, returns a list of databases as a `RawResult`; on failure, returns an error.
|
- **Return Value**: On success, returns a list of databases as a `RawResult`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn topics(&self) -> RawResult<Vec<Topic>>`
|
- `fn topics(&self) -> RawResult<Vec<Topic>>`
|
||||||
|
|
||||||
- **Interface Description**: Retrieves topic information.
|
- **Interface Description**: Retrieves topic information.
|
||||||
- **Return Value**: On success, returns a list of topics as a `RawResult`; on failure, returns an error.
|
- **Return Value**: On success, returns a list of topics as a `RawResult`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn describe(&self, table: &str) -> RawResult<Describe>`
|
- `fn describe(&self, table: &str) -> RawResult<Describe>`
|
||||||
|
|
||||||
- **Interface Description**: Describes the table structure.
|
- **Interface Description**: Describes the table structure.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `table`: The name of the table.
|
- `table`: The name of the table.
|
||||||
- **Return Value**: On success, returns a description of the table structure as a `RawResult`; on failure, returns an error.
|
- **Return Value**: On success, returns a description of the table structure as a `RawResult`; on failure, returns an error.
|
||||||
|
|
||||||
- `fn database_exists(&self, name: &str) -> RawResult<bool>`
|
- `fn database_exists(&self, name: &str) -> RawResult<bool>`
|
||||||
|
|
||||||
- **Interface Description**: Checks if a database exists.
|
- **Interface Description**: Checks if a database exists.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `name`: The name of the database.
|
- `name`: The name of the database.
|
||||||
|
@ -248,6 +259,7 @@ The Taos struct provides multiple database operation APIs, including: executing
|
||||||
The SmlData structure provides a data structure for schema-less writing and methods for accessing properties.
|
The SmlData structure provides a data structure for schema-less writing and methods for accessing properties.
|
||||||
|
|
||||||
- `pub struct SmlData`
|
- `pub struct SmlData`
|
||||||
|
|
||||||
- **Structure Description**: The `SmlData` structure is used to store schema-less data and related information.
|
- **Structure Description**: The `SmlData` structure is used to store schema-less data and related information.
|
||||||
- **Field Description**:
|
- **Field Description**:
|
||||||
- `protocol`: Schema-less protocol, supports InfluxDB `Line`, OpenTSDB `Telnet`, OpenTSDB `Json`.
|
- `protocol`: Schema-less protocol, supports InfluxDB `Line`, OpenTSDB `Telnet`, OpenTSDB `Json`.
|
||||||
|
@ -257,18 +269,22 @@ The SmlData structure provides a data structure for schema-less writing and meth
|
||||||
- `req_id`: Request ID.
|
- `req_id`: Request ID.
|
||||||
|
|
||||||
- `pub fn protocol(&self) -> SchemalessProtocol`
|
- `pub fn protocol(&self) -> SchemalessProtocol`
|
||||||
|
|
||||||
- **Interface Description**: Gets the schema-less protocol.
|
- **Interface Description**: Gets the schema-less protocol.
|
||||||
- **Return Value**: Schema-less protocol type, supports InfluxDB `Line`, OpenTSDB `Telnet`, OpenTSDB `Json`.
|
- **Return Value**: Schema-less protocol type, supports InfluxDB `Line`, OpenTSDB `Telnet`, OpenTSDB `Json`.
|
||||||
|
|
||||||
- `pub fn precision(&self) -> SchemalessPrecision`
|
- `pub fn precision(&self) -> SchemalessPrecision`
|
||||||
|
|
||||||
- **Interface Description**: Gets the timestamp precision.
|
- **Interface Description**: Gets the timestamp precision.
|
||||||
- **Return Value**: Timestamp precision type, supports `Hours`, `Minutes`, `Seconds`, `Millisecond` (default), `Microsecond`, `Nanosecond`.
|
- **Return Value**: Timestamp precision type, supports `Hours`, `Minutes`, `Seconds`, `Millisecond` (default), `Microsecond`, `Nanosecond`.
|
||||||
|
|
||||||
- `pub fn data(&self) -> &Vec<String>`
|
- `pub fn data(&self) -> &Vec<String>`
|
||||||
|
|
||||||
- **Interface Description**: Retrieves the list of data.
|
- **Interface Description**: Retrieves the list of data.
|
||||||
- **Return Value**: Reference to the list of data.
|
- **Return Value**: Reference to the list of data.
|
||||||
|
|
||||||
- `pub fn ttl(&self) -> Option<i32>`
|
- `pub fn ttl(&self) -> Option<i32>`
|
||||||
|
|
||||||
- **Interface Description**: Get the data time-to-live.
|
- **Interface Description**: Get the data time-to-live.
|
||||||
- **Return Value**: Time-to-live of the data (optional), in seconds.
|
- **Return Value**: Time-to-live of the data (optional), in seconds.
|
||||||
|
|
||||||
|
@ -283,34 +299,42 @@ The SmlData structure provides a data structure for schema-less writing and meth
|
||||||
The ResultSet structure provides methods for accessing the data and metadata of the result set.
|
The ResultSet structure provides methods for accessing the data and metadata of the result set.
|
||||||
|
|
||||||
- `fn affected_rows(&self) -> i32`
|
- `fn affected_rows(&self) -> i32`
|
||||||
|
|
||||||
- **Interface Description**: Get the number of affected rows.
|
- **Interface Description**: Get the number of affected rows.
|
||||||
- **Return Value**: Number of affected rows, type `i32`.
|
- **Return Value**: Number of affected rows, type `i32`.
|
||||||
|
|
||||||
- `fn precision(&self) -> Precision`
|
- `fn precision(&self) -> Precision`
|
||||||
|
|
||||||
- **Interface Description**: Get precision information.
|
- **Interface Description**: Get precision information.
|
||||||
- **Return Value**: Precision information, type `Precision`.
|
- **Return Value**: Precision information, type `Precision`.
|
||||||
|
|
||||||
- `fn fields(&self) -> &[Field]`
|
- `fn fields(&self) -> &[Field]`
|
||||||
|
|
||||||
- **Interface Description**: Get field information. See the Field structure description below.
|
- **Interface Description**: Get field information. See the Field structure description below.
|
||||||
- **Return Value**: Reference to an array of field information.
|
- **Return Value**: Reference to an array of field information.
|
||||||
|
|
||||||
- `fn summary(&self) -> (usize, usize)`
|
- `fn summary(&self) -> (usize, usize)`
|
||||||
|
|
||||||
- **Interface Description**: Get summary information.
|
- **Interface Description**: Get summary information.
|
||||||
- **Return Value**: A tuple containing two `usize` types, representing some statistical information.
|
- **Return Value**: A tuple containing two `usize` types, representing some statistical information.
|
||||||
|
|
||||||
- `fn num_of_fields(&self) -> usize`
|
- `fn num_of_fields(&self) -> usize`
|
||||||
|
|
||||||
- **Interface Description**: Get the number of fields.
|
- **Interface Description**: Get the number of fields.
|
||||||
- **Return Value**: Number of fields, type `usize`.
|
- **Return Value**: Number of fields, type `usize`.
|
||||||
|
|
||||||
- `fn blocks(&mut self) -> IBlockIter<'_, Self>`
|
- `fn blocks(&mut self) -> IBlockIter<'_, Self>`
|
||||||
|
|
||||||
- **Interface Description**: Get an iterator for the raw data blocks.
|
- **Interface Description**: Get an iterator for the raw data blocks.
|
||||||
- **Return Value**: Iterator for the raw data blocks, type `IBlockIter<'_, Self>`.
|
- **Return Value**: Iterator for the raw data blocks, type `IBlockIter<'_, Self>`.
|
||||||
|
|
||||||
- `fn rows(&mut self) -> IRowsIter<'_, Self>`
|
- `fn rows(&mut self) -> IRowsIter<'_, Self>`
|
||||||
|
|
||||||
- **Interface Description**: Get an iterator for row-wise querying.
|
- **Interface Description**: Get an iterator for row-wise querying.
|
||||||
- **Return Value**: Iterator for row-wise querying, type `IRowsIter<'_, Self>`.
|
- **Return Value**: Iterator for row-wise querying, type `IRowsIter<'_, Self>`.
|
||||||
|
|
||||||
- `fn deserialize<T>(&mut self) -> Map<IRowsIter<'_, Self>, fn(_: Result<RowView<'_>, Error>) -> Result<T, Error>>`
|
- `fn deserialize<T>(&mut self) -> Map<IRowsIter<'_, Self>, fn(_: Result<RowView<'_>, Error>) -> Result<T, Error>>`
|
||||||
|
|
||||||
- **Interface Description**: Deserialize row data.
|
- **Interface Description**: Deserialize row data.
|
||||||
- **Generic Parameters**:
|
- **Generic Parameters**:
|
||||||
- `T`: Target type, must implement `DeserializeOwned`.
|
- `T`: Target type, must implement `DeserializeOwned`.
|
||||||
|
@ -325,10 +349,12 @@ The ResultSet structure provides methods for accessing the data and metadata of
|
||||||
The Field structure provides methods for accessing field information.
|
The Field structure provides methods for accessing field information.
|
||||||
|
|
||||||
- `pub const fn empty() -> Field`
|
- `pub const fn empty() -> Field`
|
||||||
|
|
||||||
- **Interface Description**: Create an empty `Field` instance.
|
- **Interface Description**: Create an empty `Field` instance.
|
||||||
- **Return Value**: Returns an empty `Field` instance.
|
- **Return Value**: Returns an empty `Field` instance.
|
||||||
|
|
||||||
- `pub fn new(name: impl Into<String>, ty: Ty, bytes: u32) -> Field`
|
- `pub fn new(name: impl Into<String>, ty: Ty, bytes: u32) -> Field`
|
||||||
|
|
||||||
- **Interface Description**: Create a new `Field` instance.
|
- **Interface Description**: Create a new `Field` instance.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `name`: Field name.
|
- `name`: Field name.
|
||||||
|
@ -337,22 +363,27 @@ The Field structure provides methods for accessing field information.
|
||||||
- **Return Value**: Returns a new `Field` instance.
|
- **Return Value**: Returns a new `Field` instance.
|
||||||
|
|
||||||
- `pub fn name(&self) -> &str`
|
- `pub fn name(&self) -> &str`
|
||||||
|
|
||||||
- **Interface Description**: Get the field name.
|
- **Interface Description**: Get the field name.
|
||||||
- **Return Value**: Returns the field name.
|
- **Return Value**: Returns the field name.
|
||||||
|
|
||||||
- `pub fn escaped_name(&self) -> String`
|
- `pub fn escaped_name(&self) -> String`
|
||||||
|
|
||||||
- **Interface Description**: Get the escaped field name.
|
- **Interface Description**: Get the escaped field name.
|
||||||
- **Return Value**: Returns the escaped field name.
|
- **Return Value**: Returns the escaped field name.
|
||||||
|
|
||||||
- `pub const fn ty(&self) -> Ty`
|
- `pub const fn ty(&self) -> Ty`
|
||||||
|
|
||||||
- **Interface Description**: Get the field type.
|
- **Interface Description**: Get the field type.
|
||||||
- **Return Value**: Returns the field type.
|
- **Return Value**: Returns the field type.
|
||||||
|
|
||||||
- `pub const fn bytes(&self) -> u32`
|
- `pub const fn bytes(&self) -> u32`
|
||||||
|
|
||||||
- **Interface Description**: Get the preset length of the field.
|
- **Interface Description**: Get the preset length of the field.
|
||||||
- **Return Value**: For variable-length data types, returns the preset length; for other types, returns the byte width.
|
- **Return Value**: For variable-length data types, returns the preset length; for other types, returns the byte width.
|
||||||
|
|
||||||
- `pub fn to_c_field(&self) -> c_field_t`
|
- `pub fn to_c_field(&self) -> c_field_t`
|
||||||
|
|
||||||
- **Interface Description**: Converts a `Field` instance into a C language structure.
|
- **Interface Description**: Converts a `Field` instance into a C language structure.
|
||||||
- **Return Value**: Returns the field represented by a C language structure.
|
- **Return Value**: Returns the field represented by a C language structure.
|
||||||
|
|
||||||
|
@ -369,12 +400,14 @@ Parameter binding functionality is mainly supported by the Stmt structure.
|
||||||
The Stmt structure provides functionality related to parameter binding, used for efficient writing.
|
The Stmt structure provides functionality related to parameter binding, used for efficient writing.
|
||||||
|
|
||||||
- `fn init(taos: &Q) -> RawResult<Self>`
|
- `fn init(taos: &Q) -> RawResult<Self>`
|
||||||
|
|
||||||
- **Interface Description**: Initialize the parameter binding instance.
|
- **Interface Description**: Initialize the parameter binding instance.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `taos`: Database connection instance.
|
- `taos`: Database connection instance.
|
||||||
- **Return Value**: On success, returns the initialized instance; on failure, returns an error.
|
- **Return Value**: On success, returns the initialized instance; on failure, returns an error.
|
||||||
|
|
||||||
- `fn init_with_req_id(taos: &Q, req_id: u64) -> RawResult<Self>`
|
- `fn init_with_req_id(taos: &Q, req_id: u64) -> RawResult<Self>`
|
||||||
|
|
||||||
- **Interface Description**: Initialize the parameter binding instance using a request ID.
|
- **Interface Description**: Initialize the parameter binding instance using a request ID.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `taos`: Database connection instance.
|
- `taos`: Database connection instance.
|
||||||
|
@ -382,24 +415,28 @@ The Stmt structure provides functionality related to parameter binding, used for
|
||||||
- **Return Value**: On success, returns the initialized instance; on failure, returns an error.
|
- **Return Value**: On success, returns the initialized instance; on failure, returns an error.
|
||||||
|
|
||||||
- `fn prepare<S: AsRef<str>>(&mut self, sql: S) -> RawResult<&mut Self>`
|
- `fn prepare<S: AsRef<str>>(&mut self, sql: S) -> RawResult<&mut Self>`
|
||||||
|
|
||||||
- **Interface Description**: Prepare the SQL statement to be bound.
|
- **Interface Description**: Prepare the SQL statement to be bound.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `sql`: SQL statement to prepare.
|
- `sql`: SQL statement to prepare.
|
||||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||||
|
|
||||||
- `fn set_tbname<S: AsRef<str>>(&mut self, name: S) -> RawResult<&mut Self>`
|
- `fn set_tbname<S: AsRef<str>>(&mut self, name: S) -> RawResult<&mut Self>`
|
||||||
|
|
||||||
- **Interface Description**: Set the table name.
|
- **Interface Description**: Set the table name.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `name`: Table name.
|
- `name`: Table name.
|
||||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||||
|
|
||||||
- `fn set_tags(&mut self, tags: &[Value]) -> RawResult<&mut Self>`
|
- `fn set_tags(&mut self, tags: &[Value]) -> RawResult<&mut Self>`
|
||||||
|
|
||||||
- **Interface Description**: Set tags.
|
- **Interface Description**: Set tags.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `tags`: Array of tags.
|
- `tags`: Array of tags.
|
||||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||||
|
|
||||||
- `fn set_tbname_tags<S: AsRef<str>>(&mut self, name: S, tags: &[Value]) -> RawResult<&mut Self>`
|
- `fn set_tbname_tags<S: AsRef<str>>(&mut self, name: S, tags: &[Value]) -> RawResult<&mut Self>`
|
||||||
|
|
||||||
- **Interface Description**: Set the table name and tags.
|
- **Interface Description**: Set the table name and tags.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `name`: Table name.
|
- `name`: Table name.
|
||||||
|
@ -407,16 +444,19 @@ The Stmt structure provides functionality related to parameter binding, used for
|
||||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||||
|
|
||||||
- `fn bind(&mut self, params: &[ColumnView]) -> RawResult<&mut Self>`
|
- `fn bind(&mut self, params: &[ColumnView]) -> RawResult<&mut Self>`
|
||||||
|
|
||||||
- **Interface Description**: Bind parameters.
|
- **Interface Description**: Bind parameters.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `params`: Array of parameters.
|
- `params`: Array of parameters.
|
||||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||||
|
|
||||||
- `fn add_batch(&mut self) -> RawResult<&mut Self>`
|
- `fn add_batch(&mut self) -> RawResult<&mut Self>`
|
||||||
|
|
||||||
- **Interface Description**: Add a batch.
|
- **Interface Description**: Add a batch.
|
||||||
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
- **Return Value**: On success, returns a mutable reference to itself; on failure, returns an error.
|
||||||
|
|
||||||
- `fn execute(&mut self) -> RawResult<usize>`
|
- `fn execute(&mut self) -> RawResult<usize>`
|
||||||
|
|
||||||
- **Interface Description**: Execute the statement.
|
- **Interface Description**: Execute the statement.
|
||||||
- **Return Value**: On success, returns the number of affected rows; on failure, returns an error.
|
- **Return Value**: On success, returns the number of affected rows; on failure, returns an error.
|
||||||
|
|
||||||
|
@ -433,26 +473,31 @@ Data subscription mainly involves three structures, providing connection establi
|
||||||
Similar to TaosBuilder, TmqBuilder provides the functionality to create consumer objects.
|
Similar to TaosBuilder, TmqBuilder provides the functionality to create consumer objects.
|
||||||
|
|
||||||
- `fn available_params() -> &'static [&'static str]`
|
- `fn available_params() -> &'static [&'static str]`
|
||||||
|
|
||||||
- **Interface Description**: Get the list of available parameters in the DSN.
|
- **Interface Description**: Get the list of available parameters in the DSN.
|
||||||
- **Return Value**: Returns a reference to a static slice of strings, containing the names of available parameters.
|
- **Return Value**: Returns a reference to a static slice of strings, containing the names of available parameters.
|
||||||
|
|
||||||
- `fn from_dsn<D: IntoDsn>(dsn: D) -> RawResult<Self>`
|
- `fn from_dsn<D: IntoDsn>(dsn: D) -> RawResult<Self>`
|
||||||
|
|
||||||
- **Interface Description**: Create a connection using a DSN string, without checking the connection.
|
- **Interface Description**: Create a connection using a DSN string, without checking the connection.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `dsn`: DSN string or a type that can be converted into DSN.
|
- `dsn`: DSN string or a type that can be converted into DSN.
|
||||||
- **Return Value**: On success, returns `RawResult` of its own type, on failure returns an error.
|
- **Return Value**: On success, returns `RawResult` of its own type, on failure returns an error.
|
||||||
|
|
||||||
- `fn client_version() -> &'static str`
|
- `fn client_version() -> &'static str`
|
||||||
|
|
||||||
- **Interface Description**: Get the client version.
|
- **Interface Description**: Get the client version.
|
||||||
- **Return Value**: Returns a static string of the client version.
|
- **Return Value**: Returns a static string of the client version.
|
||||||
|
|
||||||
- `fn ping(&self, conn: &mut Self::Target) -> RawResult<()>`
|
- `fn ping(&self, conn: &mut Self::Target) -> RawResult<()>`
|
||||||
|
|
||||||
- **Interface Description**: Check if the connection is still alive.
|
- **Interface Description**: Check if the connection is still alive.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `conn`: Mutable reference to the target connection.
|
- `conn`: Mutable reference to the target connection.
|
||||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||||
|
|
||||||
- `fn ready(&self) -> bool`
|
- `fn ready(&self) -> bool`
|
||||||
|
|
||||||
- **Interface Description**: Check if it is ready to connect.
|
- **Interface Description**: Check if it is ready to connect.
|
||||||
- **Return Value**: Mostly returns `true`, indicating that the address is ready to connect.
|
- **Return Value**: Mostly returns `true`, indicating that the address is ready to connect.
|
||||||
|
|
||||||
|
@ -465,24 +510,28 @@ Similar to TaosBuilder, TmqBuilder provides the functionality to create consumer
|
||||||
The Consumer structure provides subscription-related functionalities, including subscribing, fetching messages, committing offsets, setting offsets, etc.
|
The Consumer structure provides subscription-related functionalities, including subscribing, fetching messages, committing offsets, setting offsets, etc.
|
||||||
|
|
||||||
- `fn subscribe<T: Into<String>, I: IntoIterator<Item = T> + Send>(&mut self, topics: I) -> RawResult<()>`
|
- `fn subscribe<T: Into<String>, I: IntoIterator<Item = T> + Send>(&mut self, topics: I) -> RawResult<()>`
|
||||||
|
|
||||||
- **Interface Description**: Subscribe to a series of topics.
|
- **Interface Description**: Subscribe to a series of topics.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `topics`: List of topics to subscribe to.
|
- `topics`: List of topics to subscribe to.
|
||||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||||
|
|
||||||
- `fn recv_timeout(&self, timeout: Timeout) -> RawResult<Option<(Self::Offset, MessageSet<Self::Meta, Self::Data>)>>`
|
- `fn recv_timeout(&self, timeout: Timeout) -> RawResult<Option<(Self::Offset, MessageSet<Self::Meta, Self::Data>)>>`
|
||||||
|
|
||||||
- **Interface Description**: Receive messages within a specified timeout period.
|
- **Interface Description**: Receive messages within a specified timeout period.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `timeout`: Timeout period.
|
- `timeout`: Timeout period.
|
||||||
- **Return Value**: On success, returns messages, on failure returns an error.
|
- **Return Value**: On success, returns messages, on failure returns an error.
|
||||||
|
|
||||||
- `fn commit(&self, offset: Self::Offset) -> RawResult<()>`
|
- `fn commit(&self, offset: Self::Offset) -> RawResult<()>`
|
||||||
|
|
||||||
- **Interface Description**: Commit the given offset.
|
- **Interface Description**: Commit the given offset.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `offset`: The offset to commit, see the Offset structure below.
|
- `offset`: The offset to commit, see the Offset structure below.
|
||||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||||
|
|
||||||
- `fn commit_offset(&self, topic_name: &str, vgroup_id: VGroupId, offset: i64) -> RawResult<()>`
|
- `fn commit_offset(&self, topic_name: &str, vgroup_id: VGroupId, offset: i64) -> RawResult<()>`
|
||||||
|
|
||||||
- **Interface Description**: Commit offset for a specific topic and partition.
|
- **Interface Description**: Commit offset for a specific topic and partition.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `topic_name`: Topic name.
|
- `topic_name`: Topic name.
|
||||||
|
@ -491,14 +540,17 @@ The Consumer structure provides subscription-related functionalities, including
|
||||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||||
|
|
||||||
- `fn list_topics(&self) -> RawResult<Vec<String>>`
|
- `fn list_topics(&self) -> RawResult<Vec<String>>`
|
||||||
|
|
||||||
- **Interface Description**: List all available topics.
|
- **Interface Description**: List all available topics.
|
||||||
- **Return Value**: On success, returns a list of topics, on failure returns an error.
|
- **Return Value**: On success, returns a list of topics, on failure returns an error.
|
||||||
|
|
||||||
- `fn assignments(&self) -> Option<Vec<(String, Vec<Assignment>)>>`
|
- `fn assignments(&self) -> Option<Vec<(String, Vec<Assignment>)>>`
|
||||||
|
|
||||||
- **Interface Description**: Get the current assignments of topics and partitions.
|
- **Interface Description**: Get the current assignments of topics and partitions.
|
||||||
- **Return Value**: On success, returns assignment information, on failure returns `None`.
|
- **Return Value**: On success, returns assignment information, on failure returns `None`.
|
||||||
|
|
||||||
- `fn offset_seek(&mut self, topic: &str, vg_id: VGroupId, offset: i64) -> RawResult<()>`
|
- `fn offset_seek(&mut self, topic: &str, vg_id: VGroupId, offset: i64) -> RawResult<()>`
|
||||||
|
|
||||||
- **Interface Description**: Set the offset for a specific topic and partition.
|
- **Interface Description**: Set the offset for a specific topic and partition.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `topic`: Topic name.
|
- `topic`: Topic name.
|
||||||
|
@ -507,6 +559,7 @@ The Consumer structure provides subscription-related functionalities, including
|
||||||
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
- **Return Value**: On success, returns an empty `RawResult`, on failure returns an error.
|
||||||
|
|
||||||
- `fn committed(&self, topic: &str, vgroup_id: VGroupId) -> RawResult<i64>`
|
- `fn committed(&self, topic: &str, vgroup_id: VGroupId) -> RawResult<i64>`
|
||||||
|
|
||||||
- **Interface Description**: Get the committed offset for a specific topic and partition.
|
- **Interface Description**: Get the committed offset for a specific topic and partition.
|
||||||
- **Parameter Description**:
|
- **Parameter Description**:
|
||||||
- `topic`: Topic name.
|
- `topic`: Topic name.
|
||||||
|
@ -525,10 +578,12 @@ The Consumer structure provides subscription-related functionalities, including
|
||||||
The Offset structure provides information about the database, topic, and partition to which the current message belongs.
|
The Offset structure provides information about the database, topic, and partition to which the current message belongs.
|
||||||
|
|
||||||
- `fn database(&self) -> &str`
|
- `fn database(&self) -> &str`
|
||||||
|
|
||||||
- **Interface Description**: Get the database name of the current message.
|
- **Interface Description**: Get the database name of the current message.
|
||||||
- **Return Value**: Reference to the database name.
|
- **Return Value**: Reference to the database name.
|
||||||
|
|
||||||
- `fn topic(&self) -> &str`
|
- `fn topic(&self) -> &str`
|
||||||
|
|
||||||
- **Interface Description**: Get the topic name of the current message.
|
- **Interface Description**: Get the topic name of the current message.
|
||||||
- **Return Value**: Reference to the topic name.
|
- **Return Value**: Reference to the topic name.
|
||||||
|
|
||||||
|
|
|
@ -6,57 +6,73 @@ slug: /tdengine-reference/client-libraries/python
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
import RequestId from "./_request_id.mdx";
|
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||||
|
|
||||||
`taospy` is the official Python connector for TDengine. `taospy` provides a rich API, making it convenient for Python applications to use TDengine.
|
`taopsy` is the official connector provided by TDengine database for Python language, which provides multiple access interfaces for database writing, querying, subscribing, etc.
|
||||||
|
|
||||||
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
|
The installation command is as follows:
|
||||||
|
``` bash
|
||||||
|
# Native connection and REST connection
|
||||||
|
pip3 install taospy
|
||||||
|
|
||||||
|
# WebSocket connection, optional installation
|
||||||
|
pip3 install taos-ws-py
|
||||||
|
```
|
||||||
|
|
||||||
|
The connector code is open sourced and hosted on Github [Taos Connector Python](https://github.com/taosdata/taos-connector-python).
|
||||||
|
|
||||||
## Connection Methods
|
## Connection Methods
|
||||||
|
|
||||||
`taospy` mainly provides three types of connectors. We generally recommend using **WebSocket Connection**.
|
`taopsy` provides three connection methods, and we recommend using WebSocket connection.
|
||||||
|
|
||||||
- **Native Connection**, corresponding to the `taos` module of the `taospy` package. Connects to a TDengine instance natively through the TDengine client driver (taosc), supporting data writing, querying, data subscription, schemaless interface, and parameter binding interface.
|
- **Native Connection**, Python connector loads TDengine client driver (libtaos.so/taos.dll), directly connects to TDengine instance, with high performance and fast speed.
|
||||||
- **REST Connection**, corresponding to the `taosrest` module of the `taospy` package. Connects to a TDengine instance through the HTTP interface provided by taosAdapter, does not support schemaless and data subscription features.
|
Functionally, it supports functions such as data writing, querying, data subscription, schemaless interface, and parameter binding interface.
|
||||||
- **WebSocket Connection**, corresponding to the `taos-ws-py` package, which is optional. Connects to a TDengine instance through the WebSocket interface provided by taosAdapter, with a feature set slightly different from the native connection.
|
- **REST Connection**, The Python connector connects to the TDengine instance through the HTTP interface provided by the taosAdapter, with minimal dependencies and no need to install the TDengine client driver.
|
||||||
|
Functionality does not support features such as schemaless and data subscription.
|
||||||
|
- **WebSocket Connection**, The Python connector connects to the TDengine instance through the WebSocket interface provided by the taosAdapter, which combines the advantages of the first two types of connections, namely high performance and low dependency.
|
||||||
|
In terms of functionality, there are slight differences between the WebSocket connection implementation feature set and native connections.
|
||||||
|
|
||||||
For a detailed introduction to connection methods, please refer to: [Connection Methods](../../../developer-guide/connecting-to-tdengine/)
|
For a detailed introduction of the connection method, please refer to: [Connection Method](../../../developer-guide/connecting-to-tdengine/)
|
||||||
|
|
||||||
In addition to encapsulating the native and REST interfaces, `taospy` also provides a programming interface compliant with [Python Data Access Standard (PEP 249)](https://peps.python.org/pep-0249/). This makes `taospy` easily integrated with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
In addition to encapsulating Native and REST interfaces, `taopsy` also provides compliance with [the Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/) The programming interface.
|
||||||
|
This makes it easy to integrate `taopsy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
||||||
|
|
||||||
The method of establishing a connection directly with the server using the native interface provided by the client driver is referred to as "Native Connection"; the method of establishing a connection with the server using the REST interface or WebSocket interface provided by taosAdapter is referred to as "REST Connection" or "WebSocket Connection".
|
The method of establishing a connection directly with the server using the native interface provided by the client driver is referred to as "Native Connection" in the following text;
|
||||||
|
The method of establishing a connection with the server using the REST interface or WebSocket interface provided by the taosAdapter is referred to as a "REST Connection" or "WebSocket connection" in the following text.
|
||||||
|
|
||||||
|
## Python Version Compatibility
|
||||||
|
|
||||||
|
Supports Python 3.0 and above.
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
- Native Connection [Supported Platforms](../../supported-platforms/) are consistent with the platforms supported by the TDengine client.
|
-The platforms supported by native connections are consistent with those supported by the TDengine client driver.
|
||||||
- REST Connection supports all platforms that can run Python.
|
-WebSocket/REST connections support all platforms that can run Python.
|
||||||
|
|
||||||
### Supported Features
|
## Versions History
|
||||||
|
|
||||||
- Native Connection supports all core features of TDengine, including: connection management, executing SQL, parameter binding, subscription, schemaless writing.
|
Python Connector historical versions (it is recommended to use the latest version of 'taopsy'):
|
||||||
- REST Connection supports features including: connection management, executing SQL. (Through executing SQL, you can: manage databases, manage tables and supertables, write data, query data, create continuous queries, etc.)
|
|
||||||
|
|
||||||
## Version History
|
|Python Connector Version | Major Changes | TDengine Version|
|
||||||
|
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
|
||||||
|
|2.7.18 | Support Apache SuperSet BI Tools. | - |
|
||||||
|
|2.7.16 | Add subscription configuration (session. timeout. ms, Max. roll. interval. ms). | - |
|
||||||
|
|2.7.15 | Added support for VARBINRY and GEOMETRY types. | - |
|
||||||
|
|2.7.14 | Fix Known Issues. | - |
|
||||||
|
|2.7.13 | Added tmq synchronous submission offset interface. | - |
|
||||||
|
|2.7.12 | 1. Added support for varbinary type (STMT currently does not support varbinary). <br/> 2 Query performance improvement (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209) ). | 3.1.1.2 and higher|
|
||||||
|
|2.7.9 | Data subscription supports obtaining and resetting consumption progress. | 3.0.2.6 and higher|
|
||||||
|
|2.7.8 | Added 'executioner_many'. | 3.0.0.0 and higher|
|
||||||
|
|
||||||
Regardless of the version of TDengine used, it is recommended to use the latest version of `taospy`.
|
WebSocket Connector Historical Versions:
|
||||||
|
|
||||||
|Python Connector Version|Main Changes|
|
|WebSocket Connector Version | Major Changes | TDengine Version|
|
||||||
|:-------------------:|:----:|
|
| ----------------------- | -------------------------------------------------------------------------------------------------- | ----------------- |
|
||||||
|2.7.16|Added subscription configuration (session.timeout.ms, max.poll.interval.ms)|
|
|0.3.5 | Added support for VARBINARY and GEOMETRY types, fixed known issues. | 3.3.0.0 and higher|
|
||||||
|2.7.15|Added support for VARBINARY and GEOMETRY types|
|
|0.3.2 | Optimize WebSocket SQL query and insertion performance, modify readme and documentation, fix known issues. | 3.2.3.0 and higher|
|
||||||
|2.7.14|Fixed known issues|
|
|0.2.9 | Known issue fixes. | - |
|
||||||
|2.7.13|Added tmq synchronous commit offset interface|
|
|0.2.5 | 1. Data subscription supports obtaining and resetting consumption progress. <br/>2 Support schemaless. <br/>3 Support STMT. | - |
|
||||||
|2.7.12|1. Added support for varbinary type (STMT does not support varbinary yet) <br/> 2. Improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
|0.2.4 | Data Subscription Addition/Unsubscribe Method. | 3.0.5.0 and higher|
|
||||||
|2.7.9|Data subscription supports obtaining consumption progress and resetting consumption progress|
|
|
||||||
|2.7.8|Added `execute_many`|
|
|
||||||
|
|
||||||
|Python WebSocket Connector Version|Major Changes|
|
|
||||||
|:----------------------------:|:-----:|
|
|
||||||
|0.3.5|Added support for VARBINARY and GEOMETRY types, fixed known issues|
|
|
||||||
|0.3.2|Optimized WebSocket SQL query and insertion performance, updated readme and documentation, fixed known issues|
|
|
||||||
|0.2.9|Fixed known issues|
|
|
||||||
|0.2.5|1. Data subscription supports obtaining and resetting consumption progress <br/> 2. Supports schemaless <br/> 3. Supports STMT|
|
|
||||||
|0.2.4|Data subscription adds unsubscribe method|
|
|
||||||
|
|
||||||
## Exception Handling
|
## Exception Handling
|
||||||
|
|
||||||
|
@ -69,7 +85,7 @@ The Python connector may generate 4 types of exceptions:
|
||||||
- For other TDengine module errors, please refer to [Error Codes](../../error-codes/)
|
- For other TDengine module errors, please refer to [Error Codes](../../error-codes/)
|
||||||
|
|
||||||
|Error Type|Description|Suggested Actions|
|
|Error Type|Description|Suggested Actions|
|
||||||
|:--------:|:---------:|:---------------:|
|
|:---------|:----------|:----------------|
|
||||||
|InterfaceError|taosc version too low, does not support the used interface|Please check the TDengine client version|
|
|InterfaceError|taosc version too low, does not support the used interface|Please check the TDengine client version|
|
||||||
|ConnectionError|Database connection error|Please check the TDengine server status and connection parameters|
|
|ConnectionError|Database connection error|Please check the TDengine server status and connection parameters|
|
||||||
|DatabaseError|Database error|Please check the TDengine server version and upgrade the Python connector to the latest version|
|
|DatabaseError|Database error|Please check the TDengine server version and upgrade the Python connector to the latest version|
|
||||||
|
@ -94,7 +110,7 @@ All database operations in the Python Connector, if an exception occurs, will be
|
||||||
TDengine currently supports timestamp, numeric, character, boolean types, and the corresponding Python type conversions are as follows:
|
TDengine currently supports timestamp, numeric, character, boolean types, and the corresponding Python type conversions are as follows:
|
||||||
|
|
||||||
|TDengine DataType|Python DataType|
|
|TDengine DataType|Python DataType|
|
||||||
|:---------------:|:-------------:|
|
|:---------------|:--------------|
|
||||||
|TIMESTAMP|datetime|
|
|TIMESTAMP|datetime|
|
||||||
|INT|int|
|
|INT|int|
|
||||||
|BIGINT|int|
|
|BIGINT|int|
|
||||||
|
|
|
@ -6,29 +6,27 @@ slug: /tdengine-reference/client-libraries/node
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
import RequestId from "./_request_id.mdx";
|
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||||
|
|
||||||
`@tdengine/websocket` is the official Node.js language connector for TDengine. Node.js developers can use it to develop applications that access the TDengine database.
|
`@tdengine/websocket` is the official Node.js language connector for TDengine. Node.js developers can use it to develop applications that access the TDengine database.
|
||||||
|
|
||||||
The source code for the Node.js connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-node/tree/main).
|
The source code for the Node.js connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-node/tree/main).
|
||||||
|
|
||||||
## Connection Method
|
## Node.js Version Compatibility
|
||||||
|
|
||||||
The Node.js connector currently only supports WebSocket connections, which connect to a TDengine instance through the WebSocket interface provided by taosAdapter.
|
Supports Node.js 14 and above.
|
||||||
|
|
||||||
For a detailed introduction to the connection method, please refer to: [Connection Method](../../../developer-guide/connecting-to-tdengine/)
|
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
Supports Node.js version 14 and above.
|
Support all platforms that can run Node.js.
|
||||||
|
|
||||||
## Version History
|
## Version History
|
||||||
|
|
||||||
| Node.js Connector Version | Major Changes | TDengine Version |
|
| Node.js Connector Version | Major Changes | TDengine Version |
|
||||||
| :------------------: | :----------------------: | :----------------: |
|
| ------------------------- | ------------------------------------------------------------------------ | --------------------------- |
|
||||||
| 3.1.2 | Optimized data protocol and parsing, significantly improved performance| 3.3.2.0 and higher versions |
|
| 3.1.2 | Optimized data protocol and parsing, significantly improved performance. | - |
|
||||||
| 3.1.1 | Optimized data transmission performance | 3.3.2.0 and higher versions |
|
| 3.1.1 | Optimized data transmission performance. | 3.3.2.0 and higher versions |
|
||||||
| 3.1.0 | New release, supports WebSocket connection | 3.2.0.0 and higher versions |
|
| 3.1.0 | New release, supports WebSocket connection. | 3.2.0.0 and higher versions |
|
||||||
|
|
||||||
## Exception Handling
|
## Exception Handling
|
||||||
|
|
||||||
|
@ -38,19 +36,19 @@ Error description: Node.js connector error codes range from 100 to 110, errors o
|
||||||
|
|
||||||
For specific connector error codes, please refer to:
|
For specific connector error codes, please refer to:
|
||||||
|
|
||||||
| Error Code | Description | Suggested Actions |
|
| Error Code | Description | Suggested Actions |
|
||||||
| ---------- | -------------------------------------------------------------| ----------------------------------------------------------------------------------------- |
|
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 100 | invalid variables | The parameters are illegal, please check the corresponding interface specifications and adjust the parameter types and sizes. |
|
| 100 | invalid variables | The parameters are illegal, please check the corresponding interface specifications and adjust the parameter types and sizes. |
|
||||||
| 101 | invalid url | URL error, please check if the URL is correctly filled. |
|
| 101 | invalid url | URL error, please check if the URL is correctly filled. |
|
||||||
| 102 | received server data but did not find a callback for processing | Received server data but no upper layer callback was found |
|
| 102 | received server data but did not find a callback for processing | Received server data but no upper layer callback was found |
|
||||||
| 103 | invalid message type | Received message type unrecognized, please check if the server is normal. |
|
| 103 | invalid message type | Received message type unrecognized, please check if the server is normal. |
|
||||||
| 104 | connection creation failed | Connection creation failed, please check if the network is normal. |
|
| 104 | connection creation failed | Connection creation failed, please check if the network is normal. |
|
||||||
| 105 | websocket request timeout | Request timed out |
|
| 105 | websocket request timeout | Request timed out |
|
||||||
| 106 | authentication fail | Authentication failed, please check if the username and password are correct. |
|
| 106 | authentication fail | Authentication failed, please check if the username and password are correct. |
|
||||||
| 107 | unknown sql type in tdengine | Please check the Data Type types supported by TDengine. |
|
| 107 | unknown sql type in tdengine | Please check the Data Type types supported by TDengine. |
|
||||||
| 108 | connection has been closed | The connection has been closed, please check if the Connection is used again after closing, or if the connection is normal. |
|
| 108 | connection has been closed | The connection has been closed, please check if the Connection is used again after closing, or if the connection is normal. |
|
||||||
| 109 | fetch block data parse fail | Failed to parse the fetched query data |
|
| 109 | fetch block data parse fail | Failed to parse the fetched query data |
|
||||||
| 110 | websocket connection has reached its maximum limit | WebSocket connection has reached its maximum limit |
|
| 110 | websocket connection has reached its maximum limit | WebSocket connection has reached its maximum limit |
|
||||||
|
|
||||||
- [TDengine Node.js Connector Error Code](https://github.com/taosdata/taos-connector-node/blob/main/nodejs/src/common/wsError.ts)
|
- [TDengine Node.js Connector Error Code](https://github.com/taosdata/taos-connector-node/blob/main/nodejs/src/common/wsError.ts)
|
||||||
- For errors from other TDengine modules, please refer to [Error Codes](../../error-codes/)
|
- For errors from other TDengine modules, please refer to [Error Codes](../../error-codes/)
|
||||||
|
@ -59,38 +57,38 @@ For specific connector error codes, please refer to:
|
||||||
|
|
||||||
The table below shows the mapping between TDengine DataType and Node.js DataType
|
The table below shows the mapping between TDengine DataType and Node.js DataType
|
||||||
|
|
||||||
| TDengine DataType | Node.js DataType|
|
| TDengine DataType | Node.js DataType |
|
||||||
|-------------------|-------------|
|
| ----------------- | ---------------- |
|
||||||
| TIMESTAMP | bigint |
|
| TIMESTAMP | bigint |
|
||||||
| TINYINT | number |
|
| TINYINT | number |
|
||||||
| SMALLINT | number |
|
| SMALLINT | number |
|
||||||
| INT | number |
|
| INT | number |
|
||||||
| BIGINT | bigint |
|
| BIGINT | bigint |
|
||||||
| TINYINT UNSIGNED | number |
|
| TINYINT UNSIGNED | number |
|
||||||
| SMALLINT UNSIGNED | number |
|
| SMALLINT UNSIGNED | number |
|
||||||
| INT UNSIGNED | number |
|
| INT UNSIGNED | number |
|
||||||
| BIGINT UNSIGNED | bigint |
|
| BIGINT UNSIGNED | bigint |
|
||||||
| FLOAT | number |
|
| FLOAT | number |
|
||||||
| DOUBLE | number |
|
| DOUBLE | number |
|
||||||
| BOOL | boolean |
|
| BOOL | boolean |
|
||||||
| BINARY | string |
|
| BINARY | string |
|
||||||
| NCHAR | string |
|
| NCHAR | string |
|
||||||
| JSON | string |
|
| JSON | string |
|
||||||
| VARBINARY | ArrayBuffer |
|
| VARBINARY | ArrayBuffer |
|
||||||
| GEOMETRY | ArrayBuffer |
|
| GEOMETRY | ArrayBuffer |
|
||||||
|
|
||||||
**Note**: JSON type is only supported in tags.
|
**Note**: JSON type is only supported in tags.
|
||||||
|
|
||||||
## More Example Programs
|
## More Example Programs
|
||||||
|
|
||||||
| Example Program | Description of Example Program |
|
| Example Program | Description of Example Program |
|
||||||
| ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- |
|
| ---------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
|
||||||
| [sql_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/sql_example.js) | Basic usage such as establishing connections, executing SQL, etc. |
|
| [sql_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/sql_example.js) | Basic usage such as establishing connections, executing SQL, etc. |
|
||||||
| [stmt_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/stmt_example.js) | Example of binding parameters for insertion. |
|
| [stmt_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/stmt_example.js) | Example of binding parameters for insertion. |
|
||||||
| [line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/line_example.js) | Line protocol writing example. |
|
| [line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/line_example.js) | Line protocol writing example. |
|
||||||
| [tmq_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/tmq_example.js) | Example of using subscriptions. |
|
| [tmq_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/tmq_example.js) | Example of using subscriptions. |
|
||||||
| [all_type_query](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_query.js) | Example supporting all types. |
|
| [all_type_query](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_query.js) | Example supporting all types. |
|
||||||
| [all_type_stmt](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_stmt.js) | Example of parameter binding supporting all types. |
|
| [all_type_stmt](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/all_type_stmt.js) | Example of parameter binding supporting all types. |
|
||||||
|
|
||||||
## Usage Restrictions
|
## Usage Restrictions
|
||||||
|
|
||||||
|
|
|
@ -6,42 +6,31 @@ slug: /tdengine-reference/client-libraries/csharp
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
import RequestId from "./_request_id.mdx";
|
import RequestId from "../../assets/resources/_request_id.mdx";
|
||||||
|
|
||||||
`TDengine.Connector` is the C# language connector provided by TDengine. C# developers can use it to develop C# applications that access data in the TDengine cluster.
|
`TDengine.Connector` is the C# language connector provided by TDengine. C# developers can use it to develop C# applications that access data in the TDengine cluster.
|
||||||
|
|
||||||
## Connection Methods
|
## .Net Version Compatibility
|
||||||
|
|
||||||
`TDengine.Connector` provides two types of connectors:
|
- Supports .NET Framework 4.6 and above.
|
||||||
|
- Supports .NET 5.0 and above.
|
||||||
* **Native Connection**, which connects to a TDengine instance natively through the TDengine client driver (taosc), supporting data writing, querying, data subscription, schemaless interfaces, and parameter binding interfaces.
|
|
||||||
* **WebSocket Connection**, which connects to a TDengine instance through the WebSocket interface provided by taosAdapter, with a slightly different set of features implemented compared to the native connection. (From v3.0.1 onwards)
|
|
||||||
|
|
||||||
For a detailed introduction to connection methods, please refer to: [Connection Methods](../../../developer-guide/connecting-to-tdengine/)
|
|
||||||
|
|
||||||
## Compatibility
|
|
||||||
|
|
||||||
* `TDengine.Connector` version 3.1.0 has been completely restructured and is no longer compatible with versions 3.0.2 and earlier. For documentation on version 3.0.2, please refer to [nuget](https://www.nuget.org/packages/TDengine.Connector/3.0.2)
|
|
||||||
* `TDengine.Connector` 3.x is not compatible with TDengine 2.x. If you need to use the C# connector in an environment running TDengine 2.x, please use version 1.x of TDengine.Connector.
|
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
The supported platforms are consistent with those supported by the TDengine client driver.
|
- Native connection supports the same platforms as the TDengine client driver.
|
||||||
|
- WebSocket connection supports all platforms that can run the .NET runtime.
|
||||||
|
|
||||||
:::warning
|
## Version History
|
||||||
TDengine no longer supports the 32-bit Windows platform.
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Version Support
|
| Connector Version | Major Changes | TDengine Version |
|
||||||
|
|------------------|-------------------------------------------------|-------------------|
|
||||||
|
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
|
||||||
|
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
|
||||||
|
| 3.1.2 | Fixed schemaless resource release. | - |
|
||||||
|
| 3.1.1 | Supported varbinary and geometry types. | - |
|
||||||
|
| 3.1.0 | WebSocket uses a native C# implementation. | 3.2.1.0 and higher |
|
||||||
|
|
||||||
| **Connector Version** | **TDengine Version** | **Main Features** |
|
## Exceptions and Error Codes
|
||||||
|------------------|------------------|----------------------------|
|
|
||||||
| 3.1.3 | 3.2.1.0/3.1.1.18 | Supports WebSocket auto-reconnect |
|
|
||||||
| 3.1.2 | 3.2.1.0/3.1.1.18 | Fixes schemaless resource release |
|
|
||||||
| 3.1.1 | 3.2.1.0/3.1.1.18 | Supports varbinary and geometry types |
|
|
||||||
| 3.1.0 | 3.2.1.0/3.1.1.18 | Native implementation of WebSocket |
|
|
||||||
|
|
||||||
## Exception Handling
|
|
||||||
|
|
||||||
`TDengine.Connector` will throw exceptions, and applications need to handle these exceptions. The taosc exception type `TDengineError` includes an error code and error message, which applications can use to handle the error.
|
`TDengine.Connector` will throw exceptions, and applications need to handle these exceptions. The taosc exception type `TDengineError` includes an error code and error message, which applications can use to handle the error.
|
||||||
For error reporting in other TDengine modules, please refer to [Error Codes](../../error-codes/)
|
For error reporting in other TDengine modules, please refer to [Error Codes](../../error-codes/)
|
||||||
|
|
|
@ -6,7 +6,7 @@ slug: /tdengine-reference/client-libraries/r-lang
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
import Rdemo from "../../07-develop/_connect_r.mdx"
|
import Rdemo from "../../assets/resources/_connect_r.mdx"
|
||||||
|
|
||||||
The RJDBC library in R language can enable R language programs to access TDengine data. Here are the installation process, configuration process, and R language example code.
|
The RJDBC library in R language can enable R language programs to access TDengine data. Here are the installation process, configuration process, and R language example code.
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ TDengine ODBC provides both 64-bit and 32-bit drivers. However, the 32-bit versi
|
||||||
|
|
||||||
## ODBC Version Compatibility
|
## ODBC Version Compatibility
|
||||||
|
|
||||||
- Supports ODBC 3.8 and all previous versions.
|
Supports all ODBC versions.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
@ -119,12 +119,12 @@ In addition to this, the WebSocket connection method also supports 32-bit applic
|
||||||
|
|
||||||
## Version History
|
## Version History
|
||||||
|
|
||||||
| taos_odbc Version | Main Changes | TDengine Version |
|
| taos_odbc Version | Major Changes | TDengine Version |
|
||||||
| :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- |
|
| ----------- | -------------------------------------------------------------------------------------------------- | ---------------- |
|
||||||
| v1.1.0 | 1. Supports view functionality;<br/>2. Supports VARBINARY/GEOMETRY data types;<br/>3. Supports ODBC 32-bit WebSocket connection method (Enterprise edition only);<br/>4. Supports ODBC data source configuration dialog settings for compatibility adaptation options for industrial software like KingSCADA, Kepware, etc. (Enterprise edition only); | 3.3.3.0 and higher |
|
| v1.1.0 | 1. Supports view functionality. <br/>2. Supports VARBINARY/GEOMETRY data types. <br/>3. Supports ODBC 32-bit WebSocket connection method (Enterprise edition only). <br/>4. Supports ODBC data source configuration dialog settings for compatibility adaptation options for industrial software like KingSCADA, Kepware, etc. (Enterprise edition only). | 3.3.3.0 and higher |
|
||||||
| v1.0.2 | Supports CP1252 character encoding; | 3.2.3.0 and higher |
|
| v1.0.2 | Supports CP1252 character encoding. | 3.2.3.0 and higher |
|
||||||
| v1.0.1 | 1. Supports DSN settings for BI mode, in BI mode TDengine database does not return system database and supertable subtable information;<br/>2. Refactored character set conversion module, improving read and write performance;<br/>3. Default connection method in ODBC data source configuration dialog changed to "WebSocket";<br/>4. Added "Test Connection" control in ODBC data source configuration dialog;<br/>5. ODBC data source configuration supports Chinese/English interface; | - |
|
| v1.0.1 | 1. Supports DSN settings for BI mode, in BI mode TDengine database does not return system database and supertable subtable information. <br/>2. Refactored character set conversion module, improving read and write performance. <br/> 3. Default connection method in ODBC data source configuration dialog changed to "WebSocket". <br/>4. Added "Test Connection" control in ODBC data source configuration dialog. <br/>5. ODBC data source configuration supports Chinese/English interface. | - |
|
||||||
| v1.0.0.0 | Initial release, supports interacting with Tdengine database to read and write data, refer to the "API Reference" section for details | 3.2.2.0 and higher |
|
| v1.0.0.0 | Initial release, supports interacting with Tdengine database to read and write data, refer to the "API Reference" section for details. | 3.2.2.0 and higher |
|
||||||
|
|
||||||
## Data Type Mapping
|
## Data Type Mapping
|
||||||
|
|
||||||
|
|
|
@ -7,12 +7,12 @@ import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
import Image from '@theme/IdealImage';
|
import Image from '@theme/IdealImage';
|
||||||
import imgClientLib from '../../assets/client-libraries-01.png';
|
import imgClientLib from '../../assets/client-libraries-01.png';
|
||||||
import InstallOnLinux from "./_linux_install.mdx";
|
import InstallOnLinux from "../../assets/resources/_linux_install.mdx";
|
||||||
import InstallOnWindows from "./_windows_install.mdx";
|
import InstallOnWindows from "../../assets/resources/_windows_install.mdx";
|
||||||
import InstallOnMacOS from "./_macos_install.mdx";
|
import InstallOnMacOS from "../../assets/resources/_macos_install.mdx";
|
||||||
import VerifyWindows from "./_verify_windows.mdx";
|
import VerifyWindows from "../../assets/resources/_verify_windows.mdx";
|
||||||
import VerifyLinux from "./_verify_linux.mdx";
|
import VerifyLinux from "../../assets/resources/_verify_linux.mdx";
|
||||||
import VerifyMacOS from "./_verify_macos.mdx";
|
import VerifyMacOS from "../../assets/resources/_verify_macos.mdx";
|
||||||
|
|
||||||
TDengine provides a rich set of application development interfaces. To facilitate users in quickly developing their own applications, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to the TDengine cluster using the native interface (taosc) and WebSocket interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, Lua connector, and PHP connector.
|
TDengine provides a rich set of application development interfaces. To facilitate users in quickly developing their own applications, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to the TDengine cluster using the native interface (taosc) and WebSocket interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, Lua connector, and PHP connector.
|
||||||
|
|
||||||
|
|
|
@ -15,10 +15,20 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000015 | Unable to resolve FQDN | Invalid fqdn set | Check fqdn settings |
|
| 0x80000015 | Unable to resolve FQDN | Invalid fqdn set | Check fqdn settings |
|
||||||
| 0x80000017 | Port already in use | The port is already occupied by some service, and the newly started service still tries to bind to that port | 1. Change the server port of the new service 2. Kill the service that previously occupied the port |
|
| 0x80000017 | Port already in use | The port is already occupied by some service, and the newly started service still tries to bind to that port | 1. Change the server port of the new service 2. Kill the service that previously occupied the port |
|
||||||
| 0x80000018 | Conn is broken | Due to network jitter or request time being too long (over 900 seconds), the system actively disconnects | 1. Set the system's maximum timeout duration 2. Check request duration |
|
| 0x80000018 | Conn is broken | Due to network jitter or request time being too long (over 900 seconds), the system actively disconnects | 1. Set the system's maximum timeout duration 2. Check request duration |
|
||||||
| 0x80000019 | Conn read timeout | Not enabled | |
|
| 0x80000019 | Conn read timeout | 1. The request processing time is too long 2. The server is overwhelmed 3. The server is deadlocked | 1. Explicitly configure the readTimeout parameter 2. Analyze the stack on taos |
|
||||||
| 0x80000020 | some vnode/qnode/mnode(s) out of service | After multiple retries, still unable to connect to the cluster, possibly all nodes have crashed, or the surviving nodes are not Leader nodes | 1. Check the status of taosd, analyze the reasons for taosd crash 2. Analyze why the surviving taosd cannot elect a Leader |
|
| 0x80000020 | some vnode/qnode/mnode(s) out of service | After multiple retries, still unable to connect to the cluster, possibly all nodes have crashed, or the surviving nodes are not Leader nodes | 1. Check the status of taosd, analyze the reasons for taosd crash 2. Analyze why the surviving taosd cannot elect a Leader |
|
||||||
| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | After multiple retries, still unable to connect to the cluster, possibly due to network issues, request time too long, server deadlock, etc. | 1. Check network 2. Request execution time |
|
| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | After multiple retries, still unable to connect to the cluster, possibly due to network issues, request time too long, server deadlock, etc. | 1. Check network 2. Request execution time |
|
||||||
| 0x80000022 | rpc open too many session | 1. High concurrency causing the number of occupied connections to reach the limit 2. Server BUG, causing connections not to be released | 1. Adjust configuration parameter numOfRpcSessions 2. Adjust configuration parameter timeToGetAvailableConn 3. Analyze reasons for server not releasing connections |
|
| 0x80000022 | rpc open too many session | 1. High concurrency causing the number of occupied connections to reach the limit 2. Server BUG, causing connections not to be released | 1. Adjust configuration parameter numOfRpcSessions 2. Adjust configuration parameter timeToGetAvailableConn 3. Analyze reasons for server not releasing connections |
|
||||||
|
| 0x80000023 | RPC network error | 1. Network issues, possibly intermittent 2. Server crash | 1. Check the network 2. Check if the server has restarted |
|
||||||
|
| 0x80000024 | RPC network bus | 1. When pulling data between clusters, no available connections are obtained, or the number of connections has reached the limit | 1. Check if the concurrency is too high 2. Check if there are any anomalies in the cluster nodes, such as deadlocks |
|
||||||
|
| 0x80000025 | HTTP-report already quit | 1. Issues with HTTP reporting | Internal issue, can be ignored |
|
||||||
|
| 0x80000026 | RPC module already quit | 1. The client instance has already exited, but still uses the instance for queries | Check the business code to see if there is a mistake in usage |
|
||||||
|
| 0x80000027 | RPC async module already quit | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x80000028 | RPC async in process | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x80000029 | RPC no state | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x8000002A | RPC state already dropped | 1. Engine error, can be ignored, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
| 0x8000002B | RPC msg exceed limit | 1. Single RPC message exceeds the limit, this error code will not be returned to the user side | If returned to the user side, the engine side needs to investigate the issue |
|
||||||
|
|
||||||
|
|
||||||
## common
|
## common
|
||||||
|
|
||||||
|
@ -62,6 +72,8 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000133 | Invalid operation | Invalid or unsupported operation | 1. Modify to confirm the current operation is legal and supported, check parameter validity 2. If the problem persists, preserve the scene and logs, report issue on github |
|
| 0x80000133 | Invalid operation | Invalid or unsupported operation | 1. Modify to confirm the current operation is legal and supported, check parameter validity 2. If the problem persists, preserve the scene and logs, report issue on github |
|
||||||
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
||||||
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
||||||
|
| 0x8000013C | Invalid disk id | Invalid disk id | Check users whether the mounted disk is invalid or use the parameter diskIDCheckEnabled to skip the disk check. |
|
||||||
|
|
||||||
|
|
||||||
## tsc
|
## tsc
|
||||||
|
|
||||||
|
@ -241,6 +253,7 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000529 | Vnode is stopped | Vnode is closed | Report issue |
|
| 0x80000529 | Vnode is stopped | Vnode is closed | Report issue |
|
||||||
| 0x80000530 | Duplicate write request | Duplicate write request, internal error | Report issue |
|
| 0x80000530 | Duplicate write request | Duplicate write request, internal error | Report issue |
|
||||||
| 0x80000531 | Vnode query is busy | Query is busy | Report issue |
|
| 0x80000531 | Vnode query is busy | Query is busy | Report issue |
|
||||||
|
| 0x80000540 | Vnode already exist but Dbid not match | Internal error | Report issue |
|
||||||
|
|
||||||
## tsdb
|
## tsdb
|
||||||
|
|
||||||
|
@ -273,6 +286,9 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80000729 | Task message error | Query message error | Preserve the scene and logs, report issue on GitHub |
|
| 0x80000729 | Task message error | Query message error | Preserve the scene and logs, report issue on GitHub |
|
||||||
| 0x8000072B | Task status error | Subquery status error | Preserve the scene and logs, report issue on GitHub |
|
| 0x8000072B | Task status error | Subquery status error | Preserve the scene and logs, report issue on GitHub |
|
||||||
| 0x8000072F | Job not exist | Query JOB no longer exists | Preserve the scene and logs, report issue on GitHub |
|
| 0x8000072F | Job not exist | Query JOB no longer exists | Preserve the scene and logs, report issue on GitHub |
|
||||||
|
| 0x80000739 | Query memory upper limit is reached | Single query memory upper limit is reached | Modify memory upper limit size or optimize SQL |
|
||||||
|
| 0x8000073A | Query memory exhausted | Query memory in dnode is exhausted | Limit concurrent queries or add more physical memory |
|
||||||
|
| 0x8000073B | Timeout for long time no fetch | Query without fetch for a long time | Correct application to fetch data asap |
|
||||||
|
|
||||||
## grant
|
## grant
|
||||||
|
|
||||||
|
@ -493,6 +509,7 @@ This document details the server error codes that may be encountered when using
|
||||||
| 0x80003103 | Invalid tsma state | The vgroup of the stream computing result is inconsistent with the vgroup that created the TSMA index | Check error logs, contact development for handling |
|
| 0x80003103 | Invalid tsma state | The vgroup of the stream computing result is inconsistent with the vgroup that created the TSMA index | Check error logs, contact development for handling |
|
||||||
| 0x80003104 | Invalid tsma pointer | Processing the results issued by stream computing, the message body is a null pointer. | Check error logs, contact development for handling |
|
| 0x80003104 | Invalid tsma pointer | Processing the results issued by stream computing, the message body is a null pointer. | Check error logs, contact development for handling |
|
||||||
| 0x80003105 | Invalid tsma parameters | Processing the results issued by stream computing, the result count is 0. | Check error logs, contact development for handling |
|
| 0x80003105 | Invalid tsma parameters | Processing the results issued by stream computing, the result count is 0. | Check error logs, contact development for handling |
|
||||||
|
| 0x80003113 | Tsma optimization cannot be applied with INTERVAL AUTO offset. | Tsma optimization cannot be enabled with INTERVAL AUTO OFFSET under the current query conditions. | Use SKIP_TSMA Hint or specify a manual INTERVAL OFFSET. |
|
||||||
| 0x80003150 | Invalid rsma env | Rsma execution environment is abnormal. | Check error logs, contact development for handling |
|
| 0x80003150 | Invalid rsma env | Rsma execution environment is abnormal. | Check error logs, contact development for handling |
|
||||||
| 0x80003151 | Invalid rsma state | Rsma execution state is abnormal. | Check error logs, contact development for handling |
|
| 0x80003151 | Invalid rsma state | Rsma execution state is abnormal. | Check error logs, contact development for handling |
|
||||||
| 0x80003152 | Rsma qtaskinfo creation error | Creating stream computing environment failed. | Check error logs, contact development for handling |
|
| 0x80003152 | Rsma qtaskinfo creation error | Creating stream computing environment failed. | Check error logs, contact development for handling |
|
||||||
|
|
|
@ -287,3 +287,13 @@ This feature is an optional configuration item, which is enabled by default in t
|
||||||
You can disable this parameter at any time by modifying telemetryReporting to 0 in taos.cfg, then restarting the database service.
|
You can disable this parameter at any time by modifying telemetryReporting to 0 in taos.cfg, then restarting the database service.
|
||||||
Code located at: [https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c](https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c).
|
Code located at: [https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c](https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c).
|
||||||
Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational.
|
Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational.
|
||||||
|
|
||||||
|
### 31 What should I do if I encounter 'Sync leader is unreachable' when connecting to the cluster for the first time?
|
||||||
|
|
||||||
|
Reporting this error indicates that the first connection to the cluster was successful, but the IP address accessed for the first time was not the leader of mnode. An error occurred when the client attempted to establish a connection with the leader. The client searches for the leader node through EP, which specifies the fqdn and port number. There are two common reasons for this error:
|
||||||
|
|
||||||
|
- The ports of other dnodes in the cluster are not open
|
||||||
|
- The client's hosts file is not configured correctly
|
||||||
|
|
||||||
|
Therefore, first, check whether all ports on the server and cluster (default 6030 for native connections and 6041 for HTTP connections) are open; Next, check if the client's hosts file has configured the fqdn and IP information for all dnodes in the cluster.
|
||||||
|
If the issue still cannot be resolved, it is necessary to contact Taos technical personnel for support.
|
||||||
|
|
|
@ -22,19 +22,19 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework</groupId>
|
<groupId>org.springframework</groupId>
|
||||||
<artifactId>spring-context</artifactId>
|
<artifactId>spring-context</artifactId>
|
||||||
<version>5.2.8.RELEASE</version>
|
<version>5.3.39</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework</groupId>
|
<groupId>org.springframework</groupId>
|
||||||
<artifactId>spring-jdbc</artifactId>
|
<artifactId>spring-jdbc</artifactId>
|
||||||
<version>5.1.9.RELEASE</version>
|
<version>5.3.39</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework</groupId>
|
<groupId>org.springframework</groupId>
|
||||||
<artifactId>spring-test</artifactId>
|
<artifactId>spring-test</artifactId>
|
||||||
<version>5.1.9.RELEASE</version>
|
<version>5.3.39</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -47,7 +47,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.4.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.springframework.boot</groupId>
|
<groupId>org.springframework.boot</groupId>
|
||||||
<artifactId>spring-boot-starter-parent</artifactId>
|
<artifactId>spring-boot-starter-parent</artifactId>
|
||||||
<version>2.2.1.RELEASE</version>
|
<version>2.6.15</version>
|
||||||
<relativePath/> <!-- lookup parent from repository -->
|
<relativePath/> <!-- lookup parent from repository -->
|
||||||
</parent>
|
</parent>
|
||||||
<groupId>com.taosdata.example</groupId>
|
<groupId>com.taosdata.example</groupId>
|
||||||
|
@ -65,6 +65,8 @@
|
||||||
<artifactId>spring-boot-starter-aop</artifactId>
|
<artifactId>spring-boot-starter-aop</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
|
|
|
@ -3,9 +3,10 @@ package com.taosdata.example.springbootdemo;
|
||||||
import org.mybatis.spring.annotation.MapperScan;
|
import org.mybatis.spring.annotation.MapperScan;
|
||||||
import org.springframework.boot.SpringApplication;
|
import org.springframework.boot.SpringApplication;
|
||||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||||
|
import org.springframework.boot.autoconfigure.data.jdbc.JdbcRepositoriesAutoConfiguration;
|
||||||
|
|
||||||
@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"})
|
@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"})
|
||||||
@SpringBootApplication
|
@SpringBootApplication(exclude = {JdbcRepositoriesAutoConfiguration.class})
|
||||||
public class SpringbootdemoApplication {
|
public class SpringbootdemoApplication {
|
||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
|
|
|
@ -15,6 +15,8 @@ spring.datasource.druid.max-wait=30000
|
||||||
spring.datasource.druid.validation-query=select SERVER_VERSION();
|
spring.datasource.druid.validation-query=select SERVER_VERSION();
|
||||||
spring.aop.auto=true
|
spring.aop.auto=true
|
||||||
spring.aop.proxy-target-class=true
|
spring.aop.proxy-target-class=true
|
||||||
|
|
||||||
|
spring.jooq.sql-dialect=
|
||||||
#mybatis
|
#mybatis
|
||||||
mybatis.mapper-locations=classpath:mapper/*.xml
|
mybatis.mapper-locations=classpath:mapper/*.xml
|
||||||
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
<description>Demo project for TDengine</description>
|
<description>Demo project for TDengine</description>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<spring.version>5.3.27</spring.version>
|
<spring.version>5.3.39</spring.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
@ -130,6 +130,7 @@
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-compiler-plugin</artifactId>
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
|
<version>3.13.0</version>
|
||||||
<configuration>
|
<configuration>
|
||||||
<source>8</source>
|
<source>8</source>
|
||||||
<target>8</target>
|
<target>8</target>
|
||||||
|
|
|
@ -37,7 +37,7 @@ public class QueryService {
|
||||||
stmt.execute("use " + dbName);
|
stmt.execute("use " + dbName);
|
||||||
ResultSet rs = stmt.executeQuery("show stables");
|
ResultSet rs = stmt.executeQuery("show stables");
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
String name = rs.getString("name");
|
String name = rs.getString("stable_name");
|
||||||
sqls.add("select count(*) from " + dbName + "." + name);
|
sqls.add("select count(*) from " + dbName + "." + name);
|
||||||
sqls.add("select first(*) from " + dbName + "." + name);
|
sqls.add("select first(*) from " + dbName + "." + name);
|
||||||
sqls.add("select last(*) from " + dbName + "." + name);
|
sqls.add("select last(*) from " + dbName + "." + name);
|
||||||
|
|
|
@ -1,10 +1,14 @@
|
||||||
package com.taosdata.taosdemo.service;
|
package com.taosdata.taosdemo.service;
|
||||||
|
|
||||||
|
import com.zaxxer.hikari.HikariConfig;
|
||||||
|
import com.zaxxer.hikari.HikariDataSource;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
|
||||||
public class DatabaseServiceTest {
|
public class DatabaseServiceTest {
|
||||||
private DatabaseService service;
|
|
||||||
|
private static DatabaseService service;
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCreateDatabase1() {
|
public void testCreateDatabase1() {
|
||||||
|
@ -20,4 +24,16 @@ public class DatabaseServiceTest {
|
||||||
public void useDatabase() {
|
public void useDatabase() {
|
||||||
service.useDatabase("test");
|
service.useDatabase("test");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void beforeClass() throws ClassNotFoundException {
|
||||||
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
|
HikariConfig config = new HikariConfig();
|
||||||
|
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
|
||||||
|
config.setUsername("root");
|
||||||
|
config.setPassword("taosdata");
|
||||||
|
HikariDataSource dataSource = new HikariDataSource(config);
|
||||||
|
service = new DatabaseService(dataSource);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -15,7 +15,7 @@ public class QueryServiceTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void generateSuperTableQueries() {
|
public void generateSuperTableQueries() {
|
||||||
String[] sqls = queryService.generateSuperTableQueries("restful_test");
|
String[] sqls = queryService.generateSuperTableQueries("test");
|
||||||
for (String sql : sqls) {
|
for (String sql : sqls) {
|
||||||
System.out.println(sql);
|
System.out.println(sql);
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,8 @@ public class QueryServiceTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void querySuperTable() {
|
public void querySuperTable() {
|
||||||
String[] sqls = queryService.generateSuperTableQueries("restful_test");
|
String[] sqls = queryService.generateSuperTableQueries("test");
|
||||||
queryService.querySuperTable(sqls, 1000, 10, 10);
|
queryService.querySuperTable(sqls, 100, 3, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
|
|
@ -3,6 +3,9 @@ package com.taosdata.taosdemo.service;
|
||||||
import com.taosdata.taosdemo.domain.FieldMeta;
|
import com.taosdata.taosdemo.domain.FieldMeta;
|
||||||
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
import com.taosdata.taosdemo.domain.SuperTableMeta;
|
||||||
import com.taosdata.taosdemo.domain.TagMeta;
|
import com.taosdata.taosdemo.domain.TagMeta;
|
||||||
|
import com.zaxxer.hikari.HikariConfig;
|
||||||
|
import com.zaxxer.hikari.HikariDataSource;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -10,7 +13,7 @@ import java.util.List;
|
||||||
|
|
||||||
public class SuperTableServiceTest {
|
public class SuperTableServiceTest {
|
||||||
|
|
||||||
private SuperTableService service;
|
private static SuperTableService service;
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCreate() {
|
public void testCreate() {
|
||||||
|
@ -29,4 +32,15 @@ public class SuperTableServiceTest {
|
||||||
service.create(superTableMeta);
|
service.create(superTableMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void beforeClass() throws ClassNotFoundException {
|
||||||
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
|
HikariConfig config = new HikariConfig();
|
||||||
|
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
|
||||||
|
config.setUsername("root");
|
||||||
|
config.setPassword("taosdata");
|
||||||
|
HikariDataSource dataSource = new HikariDataSource(config);
|
||||||
|
service = new SuperTableService(dataSource);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package com.taosdata.example;
|
package com.taos.example;
|
||||||
|
|
||||||
import com.alibaba.druid.pool.DruidDataSource;
|
import com.alibaba.druid.pool.DruidDataSource;
|
||||||
|
|
||||||
|
@ -8,11 +8,11 @@ import java.sql.Statement;
|
||||||
public class DruidDemo {
|
public class DruidDemo {
|
||||||
// ANCHOR: connection_pool
|
// ANCHOR: connection_pool
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
String url = "jdbc:TAOS://127.0.0.1:6030/log";
|
String url = "jdbc:TAOS-WS://127.0.0.1:6041/log";
|
||||||
|
|
||||||
DruidDataSource dataSource = new DruidDataSource();
|
DruidDataSource dataSource = new DruidDataSource();
|
||||||
// jdbc properties
|
// jdbc properties
|
||||||
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
|
dataSource.setDriverClassName("com.taosdata.jdbc.ws.WebSocketDriver");
|
||||||
dataSource.setUrl(url);
|
dataSource.setUrl(url);
|
||||||
dataSource.setUsername("root");
|
dataSource.setUsername("root");
|
||||||
dataSource.setPassword("taosdata");
|
dataSource.setPassword("taosdata");
|
||||||
|
|
|
@ -144,8 +144,9 @@ public class GeometryDemo {
|
||||||
|
|
||||||
private void executeQuery(String sql) {
|
private void executeQuery(String sql) {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
try (Statement statement = connection.createStatement()) {
|
try (Statement statement = connection.createStatement();
|
||||||
ResultSet resultSet = statement.executeQuery(sql);
|
ResultSet resultSet = statement.executeQuery(sql)) {
|
||||||
|
|
||||||
long end = System.currentTimeMillis();
|
long end = System.currentTimeMillis();
|
||||||
printSql(sql, true, (end - start));
|
printSql(sql, true, (end - start));
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package com.taosdata.example;
|
package com.taos.example;
|
||||||
|
|
||||||
import com.zaxxer.hikari.HikariConfig;
|
import com.zaxxer.hikari.HikariConfig;
|
||||||
import com.zaxxer.hikari.HikariDataSource;
|
import com.zaxxer.hikari.HikariDataSource;
|
||||||
|
@ -11,7 +11,7 @@ public class HikariDemo {
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
HikariConfig config = new HikariConfig();
|
HikariConfig config = new HikariConfig();
|
||||||
// jdbc properties
|
// jdbc properties
|
||||||
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
|
config.setJdbcUrl("jdbc:TAOS-WS://127.0.0.1:6041/log");
|
||||||
config.setUsername("root");
|
config.setUsername("root");
|
||||||
config.setPassword("taosdata");
|
config.setPassword("taosdata");
|
||||||
// connection pool configurations
|
// connection pool configurations
|
||||||
|
|
|
@ -39,6 +39,7 @@ public class TelnetLineProtocolExample {
|
||||||
createDatabase(conn);
|
createDatabase(conn);
|
||||||
SchemalessWriter writer = new SchemalessWriter(conn);
|
SchemalessWriter writer = new SchemalessWriter(conn);
|
||||||
writer.write(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
|
writer.write(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
|
||||||
|
writer.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,8 +95,8 @@ async function all_type_example() {
|
||||||
tagParams.setBoolean([true]);
|
tagParams.setBoolean([true]);
|
||||||
tagParams.setVarchar(["hello"]);
|
tagParams.setVarchar(["hello"]);
|
||||||
tagParams.setNchar(["stmt"]);
|
tagParams.setNchar(["stmt"]);
|
||||||
tagParams.setGeometry([geometryData]);
|
|
||||||
tagParams.setVarBinary([vbData]);
|
tagParams.setVarBinary([vbData]);
|
||||||
|
tagParams.setGeometry([geometryData]);
|
||||||
await stmt.setTags(tagParams);
|
await stmt.setTags(tagParams);
|
||||||
|
|
||||||
|
|
||||||
|
@ -108,8 +108,8 @@ async function all_type_example() {
|
||||||
bindParams.setBoolean([true]);
|
bindParams.setBoolean([true]);
|
||||||
bindParams.setVarchar(["hello"]);
|
bindParams.setVarchar(["hello"]);
|
||||||
bindParams.setNchar(["stmt"]);
|
bindParams.setNchar(["stmt"]);
|
||||||
bindParams.setGeometry([geometryData]);
|
|
||||||
bindParams.setVarBinary([vbData]);
|
bindParams.setVarBinary([vbData]);
|
||||||
|
bindParams.setGeometry([geometryData]);
|
||||||
|
|
||||||
await stmt.bind(bindParams);
|
await stmt.bind(bindParams);
|
||||||
await stmt.batch();
|
await stmt.batch();
|
||||||
|
|
|
@ -26,7 +26,6 @@ async function createDbAndTable() {
|
||||||
let conf = new taos.WSConfig(dsn);
|
let conf = new taos.WSConfig(dsn);
|
||||||
conf.setUser('root');
|
conf.setUser('root');
|
||||||
conf.setPwd('taosdata');
|
conf.setPwd('taosdata');
|
||||||
conf.setDb('power');
|
|
||||||
wsSql = await taos.sqlConnect(conf);
|
wsSql = await taos.sqlConnect(conf);
|
||||||
console.log("Connected to " + dsn + " successfully.");
|
console.log("Connected to " + dsn + " successfully.");
|
||||||
// create database
|
// create database
|
||||||
|
|
|
@ -40,7 +40,6 @@ async function prepare() {
|
||||||
let conf = new taos.WSConfig('ws://localhost:6041');
|
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||||
conf.setUser('root');
|
conf.setUser('root');
|
||||||
conf.setPwd('taosdata');
|
conf.setPwd('taosdata');
|
||||||
conf.setDb('power');
|
|
||||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||||
|
|
||||||
|
|
|
@ -34,10 +34,10 @@ async function createConsumer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function prepare() {
|
async function prepare() {
|
||||||
let conf = new taos.WSConfig('ws://192.168.1.98:6041');
|
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||||
conf.setUser('root');
|
conf.setUser('root');
|
||||||
conf.setPwd('taosdata');
|
conf.setPwd('taosdata');
|
||||||
conf.setDb('power');
|
|
||||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||||
|
|
||||||
|
|
|
@ -303,13 +303,12 @@ Query OK, 5 row(s) in set (0.016812s)
|
||||||
|
|
||||||
#### FILL 子句
|
#### FILL 子句
|
||||||
|
|
||||||
FILL 子句,用于指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
|
||||||
1. 不进行填充:NONE(默认填充模式)。
|
1. 不进行填充:NONE(默认填充模式)。
|
||||||
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1。
|
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要 FILL, 则需要给每一个 FILL 列指定 VALUE, 如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`, 注意, SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE, 如 `_wstart`, `_wstart+1a`, `now`, `1+1` 以及使用 partition by 时的 partition key (如 tbname)都不需要指定 VALUE, 如 `timediff(last(ts), _wstart)` 则需要指定VALUE。
|
||||||
3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
|
3. PREV 填充:使用前一个非 NULL 值填充数据。例如:FILL(PREV)。
|
||||||
4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
|
4. NULL 填充:使用 NULL 填充数据。例如:FILL(NULL)。
|
||||||
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
|
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如:FILL(LINEAR)。
|
||||||
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。
|
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如:FILL(NEXT)。
|
||||||
|
|
||||||
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略,即不产生填充数据,查询结果为空。这种行为在部分模式(PREV、NEXT、LINEAR)下具有合理性,因为在这些模式下没有数据意味着无法产生填充数值。
|
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略,即不产生填充数据,查询结果为空。这种行为在部分模式(PREV、NEXT、LINEAR)下具有合理性,因为在这些模式下没有数据意味着无法产生填充数值。
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
||||||
|
|
||||||
#### 5.2. 选择数据点位
|
#### 5.2. 选择数据点位
|
||||||
|
|
||||||
可以通过配置 **根节点ID**、**命名空间**、**正则匹配** 等条件,对点位进行筛选。
|
可以通过配置 **根节点ID**、**命名空间**、**节点ID**、**节点名称** 等条件,对点位进行筛选。
|
||||||
|
|
||||||
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
||||||
|
|
||||||
|
|
|
@ -126,7 +126,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
||||||
|
|
||||||
#### 4.2. 选择数据点位
|
#### 4.2. 选择数据点位
|
||||||
|
|
||||||
可以通过配置 **根节点ID** 和 **正则匹配** 作为过滤条件,对点位进行筛选。
|
可以通过配置 **根节点ID**、**节点ID**、**节点名称** 作为过滤条件,对点位进行筛选。
|
||||||
|
|
||||||
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,11 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
||||||
|
|
||||||
在 **Clean Session** 中,选择是否清除会话。默认值为 true。
|
在 **Clean Session** 中,选择是否清除会话。默认值为 true。
|
||||||
|
|
||||||
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称。使用如下格式设置: `topic1::0,topic2::1`。
|
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称和 QoS。使用如下格式设置: `{topic_name}::{qos}`(如:`my_topic::0`)。MQTT 协议 5.0 支持共享订阅,可以通过多个客户端订阅同一个 Topic 实现负载均衡,使用如下格式: `$share/{group_name}/{topic_name}::{qos}`,其中,`$share` 是固定前缀,表示启用共享订阅,`group_name` 是分组名称,类似 kafka 的消费者组。
|
||||||
|
|
||||||
|
在 **数据压缩** 中,配置消息体压缩算法,taosX 在接收到消息后,使用对应的压缩算法对消息体进行解压缩获取原始数据。可选项 none(不压缩), gzip, snappy, lz4 和 zstd,默认为 none。
|
||||||
|
|
||||||
|
在 **字符编码** 中,配置消息体编码格式,taosX 在接收到消息后,使用对应的编码格式对消息体进行解码获取原始数据。可选项 UTF_8, GBK, GB18030, BIG5,默认为 UTF_8
|
||||||
|
|
||||||
点击 **检查连通性** 按钮,检查数据源是否可用。
|
点击 **检查连通性** 按钮,检查数据源是否可用。
|
||||||
|
|
||||||
|
@ -146,7 +150,13 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
||||||
|
|
||||||
### 7. 高级选项
|
### 7. 高级选项
|
||||||
|
|
||||||
在 **日志级别** 下拉列表中选择日志级别。有五个选项:`TRACE`、`DEBUG`、`INFO`、`WARN`、`ERROR`。 默认值为 INFO。
|
在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0,即不缓存。
|
||||||
|
|
||||||
|
在 **处理批次上限** 中填写可以同时进行数据处理流程的批次数量,当到达此上限后,不再从消息缓存队列中获取消息,会导致缓存队列的消息积压,最小值为 1。
|
||||||
|
|
||||||
|
在 **批次大小** 中填写每次发送给数据处理流程的消息数量,和 **批次延时** 配合使用,当读取的 MQTT 消息数量达到批次大小时,就算 **批次延时** 没有到达也立即向数据处理流程发送数据,最小值为 1。
|
||||||
|
|
||||||
|
在 **批次延时** 中填写每次生成批次消息的超时时间(单位:毫秒),从每批次接收到的第一个消息开始算起,和 **批次大小** 配合使用,当读取消息到达超时时间时,就算 **批次大小** 不满足数量也立即向数据处理流程发送数据,最小值为 1。
|
||||||
|
|
||||||
当 **保存原始数据时**,以下2个参数配置生效。
|
当 **保存原始数据时**,以下2个参数配置生效。
|
||||||
|
|
||||||
|
|
|
@ -113,6 +113,8 @@ kcat <topic> \
|
||||||
|
|
||||||
在 **获取数据的最大时长** 中设置获取消息时等待数据不足的最长时间(以毫秒为单位),默认值为 100ms。
|
在 **获取数据的最大时长** 中设置获取消息时等待数据不足的最长时间(以毫秒为单位),默认值为 100ms。
|
||||||
|
|
||||||
|
在 **字符编码** 中,配置消息体编码格式,taosX 在接收到消息后,使用对应的编码格式对消息体进行解码获取原始数据。可选项 UTF_8, GBK, GB18030, BIG5,默认为 UTF_8
|
||||||
|
|
||||||
点击 **连通性检查** 按钮,检查数据源是否可用。
|
点击 **连通性检查** 按钮,检查数据源是否可用。
|
||||||
|
|
||||||

|

|
||||||
|
|
|
@ -2,86 +2,123 @@
|
||||||
title: "CSV"
|
title: "CSV"
|
||||||
sidebar_label: "CSV"
|
sidebar_label: "CSV"
|
||||||
---
|
---
|
||||||
本节讲述如何通过 Explorer 界面创建数据迁移任务, 从 CSV 迁移数据到当前 TDengine 集群。
|
本节讲述如何通过 Explorer 界面创建数据迁移任务,从 CSV 迁移数据到当前 TDengine 集群。
|
||||||
|
|
||||||
## 功能概述
|
## 功能概述
|
||||||
导入一个或多个 CSV 文件数据到 TDengine。
|
导入一个或多个 CSV 文件数据到 TDengine。
|
||||||
|
|
||||||
## 创建任务
|
## 创建任务
|
||||||
### 1. 新增数据源
|
### 1. 新增数据源
|
||||||
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
在数据写入任务列表页面中,点击 **+新建任务** 按钮,进入新建任务页面。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 2. 配置基本信息
|
### 2. 配置基本信息
|
||||||
在 **名称** 中输入任务名称,如:“test_csv”;
|
在 **名称** 中输入任务名称,如:“test_csv”。
|
||||||
|
|
||||||
在 **类型** 下拉列表中选择 **CSV**。
|
在 **类型** 下拉列表中选择 **CSV**。
|
||||||
|
|
||||||
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
|
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 3. 配置 CSV 选项
|
### 3. 配置 CSV 选项
|
||||||
在 **包含表头** 区域点击开启或关闭,如果包含表头,则第一行将被视为列信息。
|
在 **包含表头** 区域点击开启或关闭,如果包含表头,则 CSV 文件内容第一行将被视为列信息。
|
||||||
|
|
||||||
在 **忽略前 N 行** 区域填写 N,表示忽略 CSV 文件的前 N 行。
|
在 **忽略前 N 行** 区域填写数字 N,表示忽略 CSV 文件的前 N 行。
|
||||||
|
|
||||||
在 **字段分隔符** 区域进行选择,CSV 字段之间的分隔符,默认是 “,” 。
|
在 **字段分隔符** 区域选择 CSV 字段分隔符,用于分隔行内容为多个字段,默认是 `,`。
|
||||||
|
|
||||||
在 **字段引用符** 区域进行选择,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 "“"。
|
在 **字段引用符** 区域选择 CSV 字段引用符,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 `"`。
|
||||||
|
|
||||||
在 **注释前缀符** 区域进行选择,当 CSV 文件中某行以此处指定的字符开头,则忽略该行默认是 “#”。
|
在 **注释前缀符** 区域选择 CSV 行注释前缀符,当 CSV 文件中某行以此处指定的字符开头,则忽略该行,默认是 `#`。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 4. 配置解析 CSV 文件
|
### 4. 配置解析 CSV 文件
|
||||||
在本地上传 CSV 文件,例如:test-json.csv,之后会使用这条示例 csv 文件来配置提取和过滤条件。
|
|
||||||
|
|
||||||
#### 4.1 解析
|
#### 4.1 配置数据源
|
||||||
|
|
||||||
点击 **选取文件** 后,选择 test-json.csv,点击 **解析** 预览识别的列。
|
包含“上传 CSV 文件”与“监听文件目录”两种方式,“上传 CSV 文件”是指将本地文件通过浏览器上传到 taosx 所在服务器作为数据源,“监听文件目录”是指配置一个 taosx 所在服务器的绝对路径作为数据源,以下将分别进行介绍:
|
||||||
|
|
||||||
|
##### 4.1.1 上传 CSV 文件
|
||||||
|
|
||||||
|
在“上传 CSV 文件”标签页中:
|
||||||
|
|
||||||
|
点击 **选取文件** 按钮,选取一个或多个本地文件,上传到服务器作为数据源。
|
||||||
|
|
||||||
|
在 **保留已处理文件** 区域点击开启或关闭,如果开启,则文件被处理完成后仍会保留在服务器中,如果关闭,则将被删除。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
**预览解析结果**
|
##### 4.1.2 监听文件目录
|
||||||
|
|
||||||
|
在“监听文件目录”标签页中:
|
||||||
|
|
||||||
|
在 **文件监听目录** 中输入一个 taosx 所在服务器的绝对路径,路径中包含的文件及子目录文件将作为数据源。
|
||||||
|
|
||||||
|
在 **匹配模式** 中输入一个正则表达式,用于筛选过滤目录中的文件。
|
||||||
|
|
||||||
|
在 **监听新文件** 区域点击开启或关闭,如果开启,则任务永不停止,且持续处理目录中新增的文件,如果关闭,则不处理新增文件,且初始文件处理结束后任务变为完成状态。
|
||||||
|
|
||||||
|
在 **监听间隔** 中输入一个数字,用于配置监听新文件的时间间隔。
|
||||||
|
|
||||||
|
在 **文件处理顺序** 区域选择“正序”或“倒序”,用于指定文件列表的处理先后顺序,“正序”将按照文件名的字典序正序处理,“倒序”将按照文件名的字典序倒序处理,与此同时,程序总是保持先处理文件后处理同级子目录的顺序。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### 4.2 字段拆分
|
#### 4.2 解析
|
||||||
|
|
||||||
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 message 字段拆分成 `text_0` 和 `text_1` 这2个字段,选择 split 提取器,seperator 填写 -, number 填写2。
|
上传文件或配置监听目录后,点击解析按钮,页面将获取文件中的示例数据,同时得到识别的列与示例数据解析结果:
|
||||||
点击 **删除**,可以删除当前提取规则。
|
|
||||||
点击 **新增**,可以添加更多提取规则。
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
点击 **放大镜图标** 可预览提取或拆分结果。
|
#### 4.2 从列中提取或拆分
|
||||||
|
|
||||||
|
在 **从列中提取或拆分** 中填写从消息体中提取或拆分规则,例如:将 `desc` 字段拆分为 `desc_0` 与 `desc_1` 两个字段,可以选择 split 规则,separator 填写 `,`,number 填写 2 即可。
|
||||||
|
|
||||||
|
点击 **删除** 可以删除当前提取规则。
|
||||||
|
|
||||||
|
点击 **预览** 可以预览提取或拆分结果。
|
||||||
|
|
||||||
|
点击 **新增提取/拆分** 可以添加更多提取规则。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<!-- 在 **过滤** 中,填写过滤条件,例如:填写 `id != 1`,则只有 id 不为 1 的数据才会被写入 TDengine。
|
#### 4.3 过滤
|
||||||
点击 **删除**,可以删除当前过滤规则。
|
|
||||||
|
在 **过滤** 中填写过滤条件,例如:填写 `id != "1"`,则只有 id 不为 1 的数据才会被处理。
|
||||||
|
|
||||||
|
点击 **删除** 可以删除当前过滤规则。
|
||||||
|
|
||||||
|
点击 **预览** 可以预览过滤结果。
|
||||||
|
|
||||||
|
点击 **新增过滤** 可以添加更多过滤规则。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
点击 **放大镜图标** 可查看预览过滤结果。
|
#### 4.4 映射
|
||||||
|
|
||||||
 -->
|
|
||||||
|
|
||||||
#### 4.3 表映射
|
|
||||||
|
|
||||||
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
|
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
|
||||||
|
|
||||||
在 **映射** 中,填写目标超级表中的子表名称,例如:`t_${groupid}`。
|
在映射规则中,填写目标超级表中的子表名称,例如:`csv_meter_${id}`,同时配置映射到超级表的列。
|
||||||
|
|
||||||

|
点击 **预览** 可以预览映射的结果。
|
||||||
|
|
||||||
点击 **预览**,可以预览映射的结果。
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
### 5. 创建完成
|
### 5. 创建完成
|
||||||
|
|
||||||
点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到数据写入任务列表页面,可查看任务执行情况,也可以进行任务的“启动/停止”操作与“查看/编辑/删除/复制”操作。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 6. 查看运行指标
|
||||||
|
|
||||||
|
点击 **查看** 按钮,查看任务的运行指标,同时也可以查看任务中所有文件的处理情况。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
|
@ -17,7 +17,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 2. 配置基本信息
|
### 2. 配置基本信息
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
|
在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 3. 配置连接信息
|
### 3. 配置连接信息
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
点击 **连通性检查** 按钮,检查数据源是否可用。
|
点击 **连通性检查** 按钮,检查数据源是否可用。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 4. 配置采集信息
|
### 4. 配置采集信息
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **查询的时间窗口** 中,填写一个时间间隔,数据迁移任务将按照这个时间间隔划分时间窗口。
|
在 **查询的时间窗口** 中,填写一个时间间隔,数据迁移任务将按照这个时间间隔划分时间窗口。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### 4.2. 同步 History 表的数据
|
#### 4.2. 同步 History 表的数据
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **乱序时间上限** 中,填写一个时间间隔,实时数据同步过程中,超过这个时间才入库的数据可能会丢失。
|
在 **乱序时间上限** 中,填写一个时间间隔,实时数据同步过程中,超过这个时间才入库的数据可能会丢失。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### 4.3. 同步 Live 表的数据
|
#### 4.3. 同步 Live 表的数据
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **实时同步的时间间隔** 中,填写一个时间间隔,实时数据部分将按照这个时间间隔轮询数据。
|
在 **实时同步的时间间隔** 中,填写一个时间间隔,实时数据部分将按照这个时间间隔轮询数据。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 5. 配置数据映射
|
### 5. 配置数据映射
|
||||||
|
|
||||||
|
@ -105,7 +105,8 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
点击 **从服务器检索** 按钮,从 AVEVA Historian 服务器获取示例数据。
|
点击 **从服务器检索** 按钮,从 AVEVA Historian 服务器获取示例数据。
|
||||||
|
|
||||||
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 vValue 字段拆分成 `vValue_0` 和 `vValue_1` 这 2 个字段,选择 split 提取器,seperator 填写分割符 `,`, number 填写 2。
|
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 vValue 字段拆分成 `vValue_0` 和 `vValue_1` 这 2 个字段,选择
|
||||||
|
split 提取器,seperator 填写分割符 `,`, number 填写 2。
|
||||||
|
|
||||||
在 **过滤** 中,填写过滤条件,例如:填写`Value > 0`,则只有 Value 大于 0 的数据才会被写入 TDengine。
|
在 **过滤** 中,填写过滤条件,例如:填写`Value > 0`,则只有 Value 大于 0 的数据才会被写入 TDengine。
|
||||||
|
|
||||||
|
@ -113,7 +114,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
点击 **预览**,可以查看映射的结果。
|
点击 **预览**,可以查看映射的结果。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 6. 配置高级选项
|
### 6. 配置高级选项
|
||||||
|
|
||||||
|
@ -131,7 +132,7 @@ TDengine 可以高效地从 AVEVA Historian 读取数据并将其写入 TDengine
|
||||||
|
|
||||||
在 **原始数据存储目录** 中设置原始数据保存路径。
|
在 **原始数据存储目录** 中设置原始数据保存路径。
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### 7. 创建完成
|
### 7. 创建完成
|
||||||
|
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 8.9 KiB |
Before Width: | Height: | Size: 79 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 57 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 34 KiB |