Merge branch '3.0' into fix-udfd-env-var-cfg

This commit is contained in:
laiyongtao 2024-07-24 16:42:52 +08:00
commit dd467c23e9
1079 changed files with 91185 additions and 47816 deletions

View File

@ -393,7 +393,7 @@ pipeline {
agent{label " Mac_catalina "}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 30, unit: 'MINUTES'){
timeout(time: 60, unit: 'MINUTES'){
pre_test()
pre_test_build_mac()
}
@ -401,7 +401,7 @@ pipeline {
}
}
stage('linux test') {
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "}
agent{label "slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() }
when {
changeRequest()
@ -416,7 +416,7 @@ pipeline {
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
}
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 150, unit: 'MINUTES'){
timeout(time: 200, unit: 'MINUTES'){
pre_test()
script {
sh '''
@ -454,7 +454,7 @@ pipeline {
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 900 ''' + extra_param + '''
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 1200 ''' + extra_param + '''
'''
}
}

View File

@ -180,18 +180,20 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
ENDIF()
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
ENDIF()
# IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
# MESSAGE(STATUS "avx512f/avx512bmi supported by compiler")
# ENDIF()
#
# IF (COMPILER_SUPPORT_AVX512VL)
# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
# MESSAGE(STATUS "avx512vl supported by compiler")
# ENDIF()
IF ("${SIMD_AVX512_SUPPORT}" MATCHES "true")
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
MESSAGE(STATUS "avx512f/avx512bmi enabled by compiler")
ENDIF()
IF (COMPILER_SUPPORT_AVX512VL)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
MESSAGE(STATUS "avx512vl enabled by compiler")
ENDIF()
ENDIF()
# build mode

View File

@ -47,27 +47,20 @@ IF(${TD_WINDOWS})
MESSAGE("build wingetopt Win32")
option(
BUILD_WINGETOPT
BUILD_WINGETOPT
"If build wingetopt on Windows"
ON
)
option(
TDENGINE_3
"TDengine 3.x for taos-tools"
ON
)
option(
BUILD_CRASHDUMP
"If build crashdump on Windows"
ON
)
MESSAGE("build geos Win32")
option(
BUILD_GEOS
"If build geos on Windows"
"If build crashdump on Windows"
ON
)
@ -79,7 +72,7 @@ ENDIF ()
option(
BUILD_GEOS
"If build geos on Windows"
"If build with geos"
ON
)
@ -95,6 +88,12 @@ option(
ON
)
option(
BUILD_PCRE2
"If build with pcre2"
ON
)
option(
JEMALLOC_ENABLED
"If build with jemalloc"
@ -114,14 +113,14 @@ option(
)
option(
BUILD_WITH_LEVELDB
"If build with leveldb"
BUILD_WITH_LEVELDB
"If build with leveldb"
OFF
)
option(
BUILD_WITH_ROCKSDB
"If build with rocksdb"
BUILD_WITH_ROCKSDB
"If build with rocksdb"
ON
)
@ -170,46 +169,46 @@ ENDIF ()
option(
BUILD_WITH_SQLITE
"If build with sqlite"
"If build with sqlite"
OFF
)
option(
BUILD_WITH_BDB
"If build with BDB"
"If build with BDB"
OFF
)
option(
BUILD_WITH_LUCENE
"If build with lucene"
off
BUILD_WITH_LUCENE
"If build with lucene"
off
)
option(
BUILD_WITH_NURAFT
"If build with NuRaft"
"If build with NuRaft"
OFF
)
option(
BUILD_WITH_UV
"If build with libuv"
ON
"If build with libuv"
ON
)
option(
BUILD_WITH_UV_TRANS
"If build with libuv_trans "
ON
"If build with libuv_trans "
ON
)
IF(${TD_LINUX} MATCHES TRUE)
option(
BUILD_DEPENDENCY_TESTS
"If build dependency tests"
BUILD_DEPENDENCY_TESTS
"If build dependency tests"
ON
)
@ -217,14 +216,14 @@ ENDIF ()
option(
BUILD_DOCS
"If use doxygen build documents"
"If use doxygen build documents"
OFF
)
option(
BUILD_WITH_INVERTEDINDEX
"If use invertedIndex"
ON
"If use invertedIndex"
ON
)
option(

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.2.4.0.alpha")
SET(TD_VER_NUMBER "3.3.3.0.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
@ -11,6 +11,10 @@ ELSE ()
SET(TD_VER_COMPATIBLE "3.0.0.0")
ENDIF ()
IF (TD_PRODUCT_NAME)
ADD_DEFINITIONS(-DTD_PRODUCT_NAME="${TD_PRODUCT_NAME}")
ENDIF ()
find_program(HAVE_GIT NAMES git)
IF (DEFINED GITINFO)

View File

@ -12,7 +12,7 @@ ExternalProject_Add(curl2
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 #--enable-debug
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
BUILD_COMMAND make -j
INSTALL_COMMAND make install
TEST_COMMAND ""

View File

@ -2,7 +2,7 @@
# libuv
ExternalProject_Add(libuv
GIT_REPOSITORY https://github.com/libuv/libuv.git
GIT_TAG v1.44.2
GIT_TAG v1.48.0
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
CONFIGURE_COMMAND ""

View File

@ -0,0 +1,13 @@
# pcre2
ExternalProject_Add(pcre2
GIT_REPOSITORY https://github.com/PCRE2Project/pcre2.git
GIT_TAG pcre2-10.43
SOURCE_DIR "${TD_CONTRIB_DIR}/pcre2"
#BINARY_DIR "${TD_CONTRIB_DIR}/pcre2"
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG main
GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -1,7 +1,5 @@
# xz
if (${TD_LINUX})
# xz
ExternalProject_Add(xz
GIT_REPOSITORY https://github.com/xz-mirror/xz.git
GIT_TAG v5.4.4
@ -13,5 +11,4 @@ ExternalProject_Add(xz
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()
)

View File

@ -2,7 +2,6 @@
# zlib
if (${TD_LINUX})
ExternalProject_Add(zlib
GIT_REPOSITORY https://github.com/taosdata-contrib/zlib.git
GIT_TAG v1.2.11
@ -14,4 +13,3 @@ ExternalProject_Add(zlib
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()

View File

@ -121,7 +121,7 @@ if (${BUILD_CONTRIB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_ROCKSDB)
endif()
else()
else()
if (NOT ${TD_LINUX})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -183,6 +183,11 @@ if(${BUILD_GEOS})
cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
#
if(${BUILD_PCRE2})
cat("${TD_SUPPORT_DIR}/pcre2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
@ -250,6 +255,10 @@ target_include_directories(
# zlib
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
if(${TD_DARWIN})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=deprecated-non-prototype")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-non-prototype")
endif(${TD_DARWIN})
add_subdirectory(zlib EXCLUDE_FROM_ALL)
target_include_directories(
zlibstatic
@ -266,8 +275,8 @@ unset(CMAKE_PROJECT_INCLUDE_BEFORE)
# add_subdirectory(xz EXCLUDE_FROM_ALL)
# target_include_directories(
# xz
# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz
# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz
# PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/xz
# PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/xz
# )
# leveldb
@ -347,7 +356,7 @@ if (${BUILD_WITH_ROCKSDB})
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
else()
else()
if (NOT ${TD_LINUX})
MESSAGE(STATUS "ROCKSDB CXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
MESSAGE(STATUS "ROCKSDB C STATUS CONFIG: " ${CMAKE_C_FLAGS})
@ -396,8 +405,8 @@ if (${BUILD_WITH_ROCKSDB})
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
endif()
endif()
endif()
endif()
@ -601,6 +610,10 @@ if(${BUILD_GEOS})
)
endif(${BUILD_GEOS})
if (${BUILD_PCRE2})
add_subdirectory(pcre2 EXCLUDE_FROM_ALL)
endif(${BUILD_PCRE2})
# ================================================================================================
# Build test
# ================================================================================================

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -13,7 +13,7 @@ TDengine greatly improves the efficiency of data ingestion, querying, and storag
If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](./deployment).
TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.

View File

@ -27,7 +27,7 @@ docker pull tdengine/tdengine:3.0.1.4
And then run the following command:
```shell
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine
```
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connections. You can open these ports as needed.
@ -36,7 +36,7 @@ If you need to persist data to a specific directory on your local machine, pleas
```shell
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
-v ~/data/taos/dnode/log:/var/log/taos \
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
-p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine
```
:::note
@ -62,7 +62,7 @@ You can now access TDengine or run other Linux commands.
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
## Open the TDengine CLI
## TDengine Command Line Interface
On the container, run the following command to open the TDengine CLI:
@ -73,6 +73,12 @@ taos>
```
## TDegnine Graphic User Interface
From TDengine 3.3.0.0, there is a new componenet called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features.
To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. When you use it the first time, you need to register with your enterprise email, then can logon using your user name and password in the TDengine database management system.
## Test data insert performance
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
@ -125,6 +131,7 @@ SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
## Additional Information
For more information about deploying TDengine in a Docker environment, see [Deploying TDengine with Docker](../../deployment/docker).

View File

@ -35,6 +35,10 @@ gcc version - 9.3.1 or above;
## Installation
**Note**
Since TDengine 3.0.6.0, we don't provide standalone taosTools pacakge for downloading. However, all the tools included in the taosTools pacakge can be found in TDengine-server pacakge.
<Tabs>
<TabItem label=".deb" value="debinst">
@ -119,11 +123,18 @@ This installation method is supported only for Debian and Ubuntu.
</TabItem>
<TabItem label="Windows" value="windows">
Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the Windows platform.
**Note**
- TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the Windows platform.
- Since TDengine 3.1.0.0, we wonly provide client package for Windows. If you need to run TDenginer server on Windows, please contact TDengine sales team to upgrade to TDengine Enterprise.
- To run on Windows, the Microsoft Visual C++ Runtime library is required. If the Microsoft Visual C++ Runtime Library is missing on your platform, you can download and install it from [VC Runtime Library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170).
Follow the steps below:
1. Download the Windows installation package.
<PkgListV3 type={3}/>
2. Run the downloaded package to install TDengine.
Note: From version 3.0.1.7, only TDengine client pacakge can be downloaded for Windows platform. If you want to run TDengine servers on Windows, please contact our sales team to upgrade to TDengine Enterprise.
</TabItem>
<TabItem label="macOS" value="macos">
@ -153,38 +164,26 @@ After the installation is complete, run the following command to start the TDeng
```bash
systemctl start taosd
systemctl start taosadapter
systemctl start taoskeeper
systemctl start taos-explorer
```
Run the following command to confirm that TDengine is running normally:
Or you can run a scrip to start all the above services together
```bash
start-all.sh
```
systemctl can also be used to stop, restart a specific service or check its status, like below using `taosd` as example:
```bash
systemctl start taosd
systemctl stop taosd
systemctl restart taosd
systemctl status taosd
```
Output similar to the following indicates that TDengine is running normally:
```
Active: active (running)
```
Output similar to the following indicates that TDengine has not started successfully:
```
Active: inactive (dead)
```
After confirming that TDengine is running, run the `taos` command to access the TDengine CLI.
The following `systemctl` commands can help you manage TDengine service:
- Start TDengine Server: `systemctl start taosd`
- Stop TDengine Server: `systemctl stop taosd`
- Restart TDengine Server: `systemctl restart taosd`
- Check TDengine Server status: `systemctl status taosd`
:::info
- The `systemctl` command requires _root_ privileges. If you are not logged in as the _root_ user, use the `sudo` command.
@ -193,35 +192,38 @@ The following `systemctl` commands can help you manage TDengine service:
:::
## Command Line Interface (CLI)
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in terminal.
</TabItem>
<TabItem label="Windows" value="windows">
After the installation is complete, please run `sc start taosd` or run `C:\TDengine\taosd.exe` with administrator privilege to start TDengine Server. Please run `sc start taosadapter` or run `C:\TDengine\taosadapter.exe` with administrator privilege to start taosAdapter to provide http/REST service.
## Command Line Interface (CLI)
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal to start the TDengine command line.
</TabItem>
<TabItem label="macOS" value="macos">
After the installation is complete, double-click the /applications/TDengine to start the program, or run `launchctl start com.tdengine.taosd` to start TDengine Server.
After the installation is complete, double-click the /applications/TDengine to start the program, or run `sudo launchctl start ` to start TDengine services.
The following `launchctl` commands can help you manage TDengine service:
```bash
sudo launchctl start com.tdengine.taosd
sudo launchctl start com.tdengine.taosadapter
sudo launchctl start com.tdengine.taoskeeper
sudo launchctl start com.tdengine.taos-explorer
```
- Start TDengine Server: `sudo launchctl start com.tdengine.taosd`
Or you can run a scrip to start all the above services together
```bash
start-all.sh
```
- Stop TDengine Server: `sudo launchctl stop com.tdengine.taosd`
The following `launchctl` commands can help you manage TDengine service, using `taosd` service as an example below:
- Check TDengine Server status: `sudo launchctl list | grep taosd`
- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd`
```bash
sudo launchctl start com.tdengine.taosd
sudo launchctl stop com.tdengine.taosd
sudo launchctl list | grep taosd
sudo launchctl print system/com.tdengine.taosd
```
:::info
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
@ -232,24 +234,20 @@ The following `launchctl` commands can help you manage TDengine service:
:::
## Command Line Interface (CLI)
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in terminal.
</TabItem>
</Tabs>
```bash
taos
```
The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](../../train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands.
## TDengine Command Line Interface
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` (Linux/Mac) or `taos.exe` (Windows) in terminal. The prompt of TDengine CLI is like below:
```cmd
taos>
```
For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
Using TDengine CLI, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
```sql
CREATE DATABASE demo;
@ -269,6 +267,12 @@ Query OK, 2 row(s) in set (0.003128s)
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
## TDengine Graphic User Interface
From TDengine 3.3.0.0, there is a new componenet called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features.
To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. When you use it the first time, you need to register with your enterprise email, then can logon using your user name and password in the TDengine
## Test data insert performance
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:

View File

@ -1,8 +1,8 @@
```rust title="Native Connection/REST Connection"
```rust title="Native Connection"
{{#include docs/examples/rust/nativeexample/examples/connect.rs}}
```
:::note
For Rust client library, the connection depends on the feature being used. If "rest" feature is enabled, then only the implementation for "rest" is compiled and packaged.
For Rust client library, the connection depends on the feature being used. If "ws" feature is enabled, then only the implementation for "websocket" is compiled and packaged.
:::

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -26,17 +26,24 @@ Any application running on any platform can access TDengine through the REST API
## Establish Connection
There are two ways for a client library to establish connections to TDengine:
There are three ways for a client library to establish connections to TDengine:
1. REST connection through the REST API provided by the taosAdapter component.
2. Native connection through the TDengine client driver (taosc).
1. Native connection through the TDengine client driver (taosc).
2. REST connection through the REST API provided by the taosAdapter component.
3. Websocket connection provided by the taosAdapter component.
For REST and native connections, client libraries provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
![TDengine connection type](connection-type-en.webp)
For these ways of connections, client libraries provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
Key differences:
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../client-libraries/cpp#parameter-binding-api), [Subscription](../../client-libraries/cpp#subscription-and-consumption-api), etc.
1. For a Native connection, the client driver taosc and the server TDengine version must be compatible.
2. For a REST connection, users do not need to install the client driver taosc, providing the advantage of cross-platform ease of use. However, functions such as data subscription and binary data types are not available. Additionally, compared to Native and Websocket connections, a REST connection has the worst performance.
3. For a Websocket connection, users also do not need to install the client driver taosc.
4. To connect to a cloud service instance, you need to use the REST connection or Websocket connection.
Normally we recommend using **Websocket connection**.
## Install Client Driver taosc
@ -83,7 +90,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.2.4</version>
<version>3.3.0</version>
</dependency>
```
@ -123,18 +130,18 @@ require github.com/taosdata/driver-go/v3 latest
</TabItem>
<TabItem label="Rust" value="rust">
Just need to add `libtaos` dependency in `Cargo.toml`.
Just need to add `taos` dependency in `Cargo.toml`.
```toml title=Cargo.toml
[dependencies]
libtaos = { version = "0.4.2"}
taos = { version = "*"}
```
:::info
Rust client library uses different features to distinguish the way to establish connection. To establish REST connection, please enable `rest` feature.
Rust client library uses different features to distinguish the way to establish connection. To establish Websocket connection, please enable `ws` feature.
```toml
libtaos = { version = "*", features = ["rest"] }
taos = { version = "*", default-features = false, features = ["ws"] }
```
:::

View File

@ -41,7 +41,7 @@ Without the current database specified, table name must be preceded with the cor
## Create STable
In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](../../concept/#model_table1), the SQL statement below can be used to create the super table.
In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](../../concept/), the SQL statement below can be used to create the super table.
```sql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);

View File

@ -46,7 +46,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
:::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](../../../reference/schemaless/#Schemaless-Line-Protocol)
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](../../../reference/schemaless/)
## Examples

View File

@ -887,4 +887,4 @@ The `pycumsum` function finds the cumulative sum for all data in the input colum
</details>
## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../taos-sql/udf/).
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../../taos-sql/udf/).

View File

@ -32,7 +32,7 @@ The version number of the TDengine client driver and the version number of the T
## Installation Steps
Please refer to the [Installation Steps](../#installation-steps) for TDengine client driver installation
Please refer to [Install Client Driver](../#install-client-driver) for TDengine client driver installation
## Establishing a connection
@ -372,7 +372,7 @@ The specific functions related to the interface are as follows (see also the [pr
Execute the prepared statement. Currently, a statement can only be executed once.
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
Gets the number of rows affected by executing bind statements multiple times.
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
@ -451,6 +451,101 @@ In addition to writing data using the SQL method or the parameter binding API, w
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.
### Subscription API
- `const char *tmq_err2str(int32_t code)`
**Description**
- This interface is used to convert error codes for data subscriptions into error messages
**Parameter description**
- code: error code
**Return value**
- non NULL, return error message, error message may be empty
- `tmq_conf_t *tmq_conf_new()`
- `tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value)`
- `void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param)`
- `void tmq_conf_destroy(tmq_conf_t *conf)`
tmq_conf_res_t defined as follows:
```
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
```
commit callback function defined as follows:
```
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param))
```
**Description**
- tmq_conf_new : create a tmq_conf_t structure to configure consumption parameters
- tmq_conf_set : set configuration, configuration is key-value pair
- tmq_conf_set_auto_commit_cb : set auto commit callback function
- tmq_conf_destroy : destroy tmq_conf_t structure
**Parameter description**
- tmq_conf_set : key is parameter namevalue is parameter value
- tmq_conf_set_auto_commit_cb : cb is callback function, param is callback function parameter
**Return value**
- tmq_conf_new: structure of tmq_conf_t, NULL failed
- tmq_conf_set: tmq_conf_res_t, TMQ_CONF_OK means success, others means failure
- `tmq_list_t *tmq_list_new()`
- `int32_t tmq_list_append(tmq_list_t *, const char *)`
- `void tmq_list_destroy(tmq_list_t *)`
- `int32_t tmq_list_get_size(const tmq_list_t *)`
- `char **tmq_list_to_c_array(const tmq_list_t *)`
**Description**
- tmq_list_new : build a tmq_list_t constructure, used to save topic
- tmq_list_append : add topic to tmq_list_t
- tmq_list_destroy : destroy tmq_list_t
- tmq_list_get_size : get size of tmq_list_t
- tmq_list_to_c_array : convert tmq_list_t to c array, element is string pointer
**Return value**
- tmq_list_new : structure of tmq_list_t, tmq_list_t is a list of strings, NULL failed
- tmq_list_append : zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- tmq_list_get_size : size of tmq_list_t, -1 failed
- tmq_list_to_c_array : c array, element is pointer of string, NULL failed
- `tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen)`
- `int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list)`
- `int32_t tmq_unsubscribe(tmq_t *tmq)`
- `int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topic_list)`
- `TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout)`
- `int32_t tmq_consumer_close(tmq_t *tmq)`
**Description**
- tmq_consumer_new : build a tmq_t constructure, need to be used with tmq_consumer_close
- tmq_subscribe : subscribe topic, need to be used with tmq_unsubscribe
- tmq_unsubscribe : unsubscribe topic, need to be used with tmq_subscribe
- tmq_subscription : obtain a list of topics subscribed by consumer
- tmq_consumer_poll : used to consume data
- tmq_consumer_close : clost tmq_t, need to be used with tmq_consumer_new
**Parameter description**
- conf: sed to configure consume parameters
- errstr: The error information is stored in this string. Allocation and release of memory are the responsibility of the caller
- errstenLen: the length of errstr
- tmq: structure of tmq_t returned by tmq_consumer_new
- topic_list: a list of topics subscribed by consumersneed to be freed by tmq_list_destroy
- timeout: the timeout time, measured in milliseconds, indicates how long it takes for data to expire. If it is negative, it will default to 1 second
**Return value**
- tmq_consumer_new: structure of tmq_t, NULL failed
- tmq_subscribe: zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- tmq_unsubscribe: zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- tmq_subscription: zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- tmq_consumer_poll: structure of TAOS_RES(same like taos_query), NULL if there is no data
- tmq_consumer_close: zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
@ -474,6 +569,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Return value**
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
@ -482,6 +578,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Return value**
- the value of committed offset, -2147467247 means no committed value, Other values less than 0 indicate failure
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
@ -499,6 +596,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
@ -507,6 +605,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Return value**
- the current consumption location, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
**Function description**
@ -514,25 +613,52 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
- `int32_t tmq_get_vgroup_id(TAOS_RES *res)`
**Description**
- tmq_get_vgroup_offset : Obtain the starting offset of the consumed data
- tmq_get_vgroup_id : Obtain the vgroup id of the consumed data
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
**Function description**
- Obtain the starting offset of the consumed data
**Parameter description**
- msgMessage consumed
**Return value**
- the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- msg : Message consumed
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
**Function description**
- Obtain a list of topics subscribed by consumers
**Parameter description**
- topics: a list of topics subscribed by consumersneed to be freed by tmq_list_destroy
**Return value**
- zero successnone zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- tmq_get_vgroup_offset : the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- tmq_get_vgroup_id : vgroup id of result, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `TAOS *tmq_get_connect(tmq_t *tmq)`
- `const char *tmq_get_table_name(TAOS_RES *res)`
- `tmq_res_t tmq_get_res_type(TAOS_RES *res)`
- `const char *tmq_get_topic_name(TAOS_RES *res)`
- `const char *tmq_get_db_name(TAOS_RES *res)`
tmq_res_t the type of consumed result, defined as follows:
```
typedef enum tmq_res_t {
TMQ_RES_INVALID = -1, // invalid
TMQ_RES_DATA = 1, // data
TMQ_RES_TABLE_META = 2, // meta
TMQ_RES_METADATA = 3 // data & meta
} tmq_res_t;
```
**Description**
- tmq_get_connect : when creating a consumer, a link will be automatically established and saved in the tmq_t structure. This interface allows users to obtain link information(same like taos_connect) from the tmq_t structure
- tmq_get_table_name : get the table name of result
- tmq_get_res_type : get the type of result
- tmq_get_topic_name : get the topic name of result
- tmq_get_db_name : get the db name of result
**Parameter description**
- tmq : tmq_t structure created by tmq_consumer_new
- res : TAOS_RES structure returned by tmq_consumer_poll
**Return value**
- tmq_get_connect : connection info in tmq, NULL if failed
- tmq_get_table_name : table name of result, NULL if failed
- tmq_get_res_type : result type tmq_res_t
- tmq_get_topic_name : topic name of result, NULL if failed
- tmq_get_db_name : db name of result, NULL if failed

File diff suppressed because it is too large Load Diff

View File

@ -13,14 +13,23 @@ import GoInfluxLine from "../07-develop/03-insert-data/_go_line.mdx"
import GoOpenTSDBTelnet from "../07-develop/03-insert-data/_go_opts_telnet.mdx"
import GoOpenTSDBJson from "../07-develop/03-insert-data/_go_opts_json.mdx"
import GoQuery from "../07-develop/04-query-data/_go.mdx"
import RequestId from "./_request_id.mdx";
`driver-go` is the official Go language client library for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data.
`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection.
## Connection types
This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`.
`driver-go` provides 3 connection types.
The source code of `driver-go` is hosted on [GitHub](https://github.com/taosdata/driver-go).
* **Native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface.
* **REST connection**, which is implemented through taosAdapter. Some features like schemaless and subscriptions are not supported.
* **Websocket connection** which is implemented through taosAdapter. The set of features implemented by the WebSocket connection differs slightly from those implemented by the native connection.
For a detailed introduction of the connection types, please refer to: [Establish Connection](../../develop/connect/#establish-connection)
## Compatibility
Supports minimum Go version 1.14, it is recommended to use the latest Go version
## Supported platforms
@ -129,7 +138,7 @@ username:password@protocol(address)/dbname?param=value
_taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver.
Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, DSN supports the following parameters.
Use `taosSql` as `driverName` and use a correct DSN as `dataSourceName`, DSN supports the following parameters.
* cfg specifies the `taos.cfg` directory
@ -160,7 +169,7 @@ func main() {
_taosRestful_ implements Go's `database/sql/driver` interface via `http client`. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver.
Use `taosRestful` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN.
Use `taosRestful` as `driverName` and use a correct DSN as `dataSourceName` with the following parameters supported by the DSN.
* `disableCompression` whether to accept compressed data, default is true do not accept compressed data, set to false if transferring data using gzip compression.
* `readBufferSize` The default size of the buffer for reading data is 4K (4096), which can be adjusted upwards when the query result has a lot of data.
@ -191,7 +200,7 @@ func main() {
_taosRestful_ implements Go's `database/sql/driver` interface via `http client`. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver (driver-go minimum version 3.0.2).
Use `taosWS` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN.
Use `taosWS` as `driverName` and use a correct DSN as `dataSourceName` with the following parameters supported by the DSN.
* `writeTimeout` The timeout to send data via WebSocket.
* `readTimeout` The timeout to receive response data via WebSocket.
@ -233,45 +242,27 @@ The Go client library does not support this feature
### Create database and tables
```go
var taosDSN = "root:taosdata@tcp(localhost:6030)/"
taos, err := sql.Open("taosSql", taosDSN)
if err != nil {
log.Fatalln("failed to connect TDengine, err:", err)
}
defer taos.Close()
_, err := taos.Exec("CREATE DATABASE power")
if err != nil {
log.Fatalln("failed to create database, err:", err)
}
_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
if err != nil {
log.Fatalln("failed to create stable, err:", err)
}
{{#include docs/examples/go/demo/query/main.go:create_db_and_table}}
```
### Insert data
<GoInsert />
```go
{{#include docs/examples/go/demo/query/main.go:insert_data}}
```
### Querying data
<GoQuery />
```go
{{#include docs/examples/go/demo/query/main.go:query_data}}
```
### execute SQL with reqId
This reqId can be used to request link tracing.
<RequestId />
```go
db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/")
if err != nil {
panic(err)
}
defer db.Close()
ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID())
_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql")
if err != nil {
panic(err)
}
{{#include docs/examples/go/demo/query/main.go:with_reqid}}
```
### Writing data via parameter binding
@ -280,375 +271,14 @@ if err != nil {
<TabItem value="native" label="native connection">
```go
package main
import (
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/param"
)
func main() {
db, err := af.Open("", "root", "taosdata", "", 0)
if err != nil {
panic(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists example_stmt")
if err != nil {
panic(err)
}
_, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
stmt := db.InsertStmt()
err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
panic(err)
}
now := time.Now()
params := make([]*param.Param, 14)
params[0] = param.NewParam(2).
AddTimestamp(now, common.PrecisionMilliSecond).
AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond)
params[1] = param.NewParam(2).AddBool(true).AddNull()
params[2] = param.NewParam(2).AddTinyint(2).AddNull()
params[3] = param.NewParam(2).AddSmallint(3).AddNull()
params[4] = param.NewParam(2).AddInt(4).AddNull()
params[5] = param.NewParam(2).AddBigint(5).AddNull()
params[6] = param.NewParam(2).AddUTinyint(6).AddNull()
params[7] = param.NewParam(2).AddUSmallint(7).AddNull()
params[8] = param.NewParam(2).AddUInt(8).AddNull()
params[9] = param.NewParam(2).AddUBigint(9).AddNull()
params[10] = param.NewParam(2).AddFloat(10).AddNull()
params[11] = param.NewParam(2).AddDouble(11).AddNull()
params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull()
params[13] = param.NewParam(2).AddNchar("nchar").AddNull()
paramTypes := param.NewColumnType(14).
AddTimestamp().
AddBool().
AddTinyint().
AddSmallint().
AddInt().
AddBigint().
AddUTinyint().
AddUSmallint().
AddUInt().
AddUBigint().
AddFloat().
AddDouble().
AddBinary(6).
AddNchar(5)
err = stmt.BindParam(params, paramTypes)
if err != nil {
panic(err)
}
err = stmt.AddBatch()
if err != nil {
panic(err)
}
err = stmt.Execute()
if err != nil {
panic(err)
}
err = stmt.Close()
if err != nil {
panic(err)
}
// select * from example_stmt.tb1
}
{{#include docs/examples/go/demo/stmt/main.go}}
```
</TabItem>
<TabItem value="WebSocket" label="WebSocket connection">
```go
package main
import (
"database/sql"
"fmt"
"time"
"github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/param"
_ "github.com/taosdata/driver-go/v3/taosRestful"
"github.com/taosdata/driver-go/v3/ws/stmt"
)
func main() {
db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/")
if err != nil {
panic(err)
}
defer db.Close()
prepareEnv(db)
config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0)
config.SetConnectUser("root")
config.SetConnectPass("taosdata")
config.SetConnectDB("example_ws_stmt")
config.SetMessageTimeout(common.DefaultMessageTimeout)
config.SetWriteWait(common.DefaultWriteWait)
config.SetErrorHandler(func(connector *stmt.Connector, err error) {
panic(err)
})
config.SetCloseHandler(func() {
fmt.Println("stmt connector closed")
})
connector, err := stmt.NewConnector(config)
if err != nil {
panic(err)
}
now := time.Now()
{
stmt, err := connector.Init()
if err != nil {
panic(err)
}
err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
panic(err)
}
err = stmt.SetTableName("tb1")
if err != nil {
panic(err)
}
err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0))
if err != nil {
panic(err)
}
params := []*param.Param{
param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0),
param.NewParam(3).AddBool(true).AddNull().AddBool(true),
param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1),
param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1),
param.NewParam(3).AddInt(1).AddNull().AddInt(1),
param.NewParam(3).AddBigint(1).AddNull().AddBigint(1),
param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1),
param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1),
param.NewParam(3).AddUInt(1).AddNull().AddUInt(1),
param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1),
param.NewParam(3).AddFloat(1).AddNull().AddFloat(1),
param.NewParam(3).AddDouble(1).AddNull().AddDouble(1),
param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")),
param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"),
}
paramTypes := param.NewColumnType(14).
AddTimestamp().
AddBool().
AddTinyint().
AddSmallint().
AddInt().
AddBigint().
AddUTinyint().
AddUSmallint().
AddUInt().
AddUBigint().
AddFloat().
AddDouble().
AddBinary(0).
AddNchar(0)
err = stmt.BindParam(params, paramTypes)
if err != nil {
panic(err)
}
err = stmt.AddBatch()
if err != nil {
panic(err)
}
err = stmt.Exec()
if err != nil {
panic(err)
}
affected := stmt.GetAffectedRows()
fmt.Println("all_json affected rows:", affected)
err = stmt.Close()
if err != nil {
panic(err)
}
}
{
stmt, err := connector.Init()
if err != nil {
panic(err)
}
err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
err = stmt.SetTableName("tb1")
if err != nil {
panic(err)
}
err = stmt.SetTableName("tb2")
if err != nil {
panic(err)
}
err = stmt.SetTags(
param.NewParam(14).
AddTimestamp(now, 0).
AddBool(true).
AddTinyint(2).
AddSmallint(2).
AddInt(2).
AddBigint(2).
AddUTinyint(2).
AddUSmallint(2).
AddUInt(2).
AddUBigint(2).
AddFloat(2).
AddDouble(2).
AddBinary([]byte("tb2")).
AddNchar("tb2"),
param.NewColumnType(14).
AddTimestamp().
AddBool().
AddTinyint().
AddSmallint().
AddInt().
AddBigint().
AddUTinyint().
AddUSmallint().
AddUInt().
AddUBigint().
AddFloat().
AddDouble().
AddBinary(0).
AddNchar(0),
)
if err != nil {
panic(err)
}
params := []*param.Param{
param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0),
param.NewParam(3).AddBool(true).AddNull().AddBool(true),
param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1),
param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1),
param.NewParam(3).AddInt(1).AddNull().AddInt(1),
param.NewParam(3).AddBigint(1).AddNull().AddBigint(1),
param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1),
param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1),
param.NewParam(3).AddUInt(1).AddNull().AddUInt(1),
param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1),
param.NewParam(3).AddFloat(1).AddNull().AddFloat(1),
param.NewParam(3).AddDouble(1).AddNull().AddDouble(1),
param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")),
param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"),
}
paramTypes := param.NewColumnType(14).
AddTimestamp().
AddBool().
AddTinyint().
AddSmallint().
AddInt().
AddBigint().
AddUTinyint().
AddUSmallint().
AddUInt().
AddUBigint().
AddFloat().
AddDouble().
AddBinary(0).
AddNchar(0)
err = stmt.BindParam(params, paramTypes)
if err != nil {
panic(err)
}
err = stmt.AddBatch()
if err != nil {
panic(err)
}
err = stmt.Exec()
if err != nil {
panic(err)
}
affected := stmt.GetAffectedRows()
fmt.Println("all_all affected rows:", affected)
err = stmt.Close()
if err != nil {
panic(err)
}
}
}
func prepareEnv(db *sql.DB) {
steps := []string{
"create database example_ws_stmt",
"create table example_ws_stmt.all_json(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")" +
"tags(t json)",
"create table example_ws_stmt.all_all(" +
"ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")" +
"tags(" +
"tts timestamp," +
"tc1 bool," +
"tc2 tinyint," +
"tc3 smallint," +
"tc4 int," +
"tc5 bigint," +
"tc6 tinyint unsigned," +
"tc7 smallint unsigned," +
"tc8 int unsigned," +
"tc9 bigint unsigned," +
"tc10 float," +
"tc11 double," +
"tc12 binary(20)," +
"tc13 nchar(20))",
}
for _, step := range steps {
_, err := db.Exec(step)
if err != nil {
panic(err)
}
}
}
{{#include docs/examples/go/demo/stmtws/main.go}}
```
</TabItem>
@ -661,98 +291,14 @@ func prepareEnv(db *sql.DB) {
<TabItem value="native" label="native connection">
```go
import (
"fmt"
"github.com/taosdata/driver-go/v3/af"
)
func main() {
conn, err := af.Open("localhost", "root", "taosdata", "", 6030)
if err != nil {
fmt.Println("fail to connect, err:", err)
}
defer conn.Close()
_, err = conn.Exec("create database if not exists example")
if err != nil {
panic(err)
}
_, err = conn.Exec("use example")
if err != nil {
panic(err)
}
influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"
err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns")
if err != nil {
panic(err)
}
telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0"
err = conn.OpenTSDBInsertTelnetLines([]string{telnetData})
if err != nil {
panic(err)
}
jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"
err = conn.OpenTSDBInsertJsonPayload(jsonData)
if err != nil {
panic(err)
}
}
{{#include docs/examples/go/demo/sml/main.go}}
```
</TabItem>
<TabItem value="WebSocket" label="WebSocket connection">
```go
import (
"database/sql"
"log"
"time"
"github.com/taosdata/driver-go/v3/common"
_ "github.com/taosdata/driver-go/v3/taosWS"
"github.com/taosdata/driver-go/v3/ws/schemaless"
)
func main() {
db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/")
if err != nil {
log.Fatal(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists schemaless_ws")
if err != nil {
log.Fatal(err)
}
s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1,
schemaless.SetDb("schemaless_ws"),
schemaless.SetReadTimeout(10*time.Second),
schemaless.SetWriteTimeout(10*time.Second),
schemaless.SetUser("root"),
schemaless.SetPassword("taosdata"),
schemaless.SetErrorHandler(func(err error) {
log.Fatal(err)
}),
))
if err != nil {
panic(err)
}
influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000"
telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0"
jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"
err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID())
if err != nil {
panic(err)
}
err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID())
if err != nil {
panic(err)
}
err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID())
if err != nil {
panic(err)
}
}
{{#include docs/examples/go/demo/smlws/main.go}}
```
</TabItem>
@ -774,89 +320,31 @@ The TDengine Go client library supports subscription functionality with the foll
#### Create a Topic
```go
db, err := af.Open("", "root", "taosdata", "", 0)
if err != nil {
panic(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400")
if err != nil {
panic(err)
}
_, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq")
if err != nil {
panic(err)
}
{{#include docs/examples/go/demo/consumer/main.go:create_topic}}
```
#### Create a Consumer
```go
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
}
{{#include docs/examples/go/demo/consumer/main.go:create_consumer}}
```
#### Subscribe to consume data
```go
err = consumer.Subscribe("example_tmq_topic", nil)
if err != nil {
panic(err)
}
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
fmt.Printf("get message:%v\n", e)
case tmqcommon.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
panic(e)
}
consumer.Commit()
}
}
{{#include docs/examples/go/demo/consumer/main.go:poll_data}}
```
#### Assignment subscription Offset
```go
partitions, err := consumer.Assignment()
if err != nil {
panic(err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
err = consumer.Seek(tmqcommon.TopicPartition{
Topic: partitions[i].Topic,
Partition: partitions[i].Partition,
Offset: 0,
}, 0)
if err != nil {
panic(err)
}
}
{{#include docs/examples/go/demo/consumer/main.go:consumer_seek}}
```
#### Close subscriptions
```go
err = consumer.Close()
if err != nil {
panic(err)
}
{{#include docs/examples/go/demo/consumer/main.go:consumer_close}}
```
#### Full Sample Code
@ -865,232 +353,14 @@ The TDengine Go client library supports subscription functionality with the foll
<TabItem value="native" label="native connection">
```go
package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
)
func main() {
db, err := af.Open("", "root", "taosdata", "", 0)
if err != nil {
panic(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400")
if err != nil {
panic(err)
}
_, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq")
if err != nil {
panic(err)
}
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
}
err = consumer.Subscribe("example_tmq_topic", nil)
if err != nil {
panic(err)
}
_, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_tmq.t1 values(now,1)")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
fmt.Printf("get message:%v\n", e)
case tmqcommon.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
panic(e)
}
consumer.Commit()
}
}
partitions, err := consumer.Assignment()
if err != nil {
panic(err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
err = consumer.Seek(tmqcommon.TopicPartition{
Topic: partitions[i].Topic,
Partition: partitions[i].Partition,
Offset: 0,
}, 0)
if err != nil {
panic(err)
}
}
partitions, err = consumer.Assignment()
if err != nil {
panic(err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
}
err = consumer.Close()
if err != nil {
panic(err)
}
}
{{#include docs/examples/go/demo/consumer/main.go}}
```
</TabItem>
<TabItem value="WebSocket" label="WebSocket connection">
```go
package main
import (
"database/sql"
"fmt"
"time"
"github.com/taosdata/driver-go/v3/common"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
_ "github.com/taosdata/driver-go/v3/taosRestful"
"github.com/taosdata/driver-go/v3/ws/tmq"
)
func main() {
db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/")
if err != nil {
panic(err)
}
defer db.Close()
prepareEnv(db)
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"ws.url": "ws://127.0.0.1:6041/rest/tmq",
"ws.message.channelLen": uint(0),
"ws.message.timeout": common.DefaultMessageTimeout,
"ws.message.writeWait": common.DefaultWriteWait,
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"group.id": "example",
"client.id": "example_consumer",
"auto.offset.reset": "latest",
})
if err != nil {
panic(err)
}
err = consumer.Subscribe("example_ws_tmq_topic", nil)
if err != nil {
panic(err)
}
_, err = db.Exec("create table example_ws_tmq.t_all(ts timestamp," +
"c1 bool," +
"c2 tinyint," +
"c3 smallint," +
"c4 int," +
"c5 bigint," +
"c6 tinyint unsigned," +
"c7 smallint unsigned," +
"c8 int unsigned," +
"c9 bigint unsigned," +
"c10 float," +
"c11 double," +
"c12 binary(20)," +
"c13 nchar(20)" +
")")
if err != nil {
panic(err)
}
go func() {
for {
_, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
fmt.Printf("get message:%v\n", e)
case tmqcommon.Error:
fmt.Printf("%% Error: %v: %v\n", e.Code(), e)
panic(e)
}
consumer.Commit()
}
}
partitions, err := consumer.Assignment()
if err != nil {
panic(err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
err = consumer.Seek(tmqcommon.TopicPartition{
Topic: partitions[i].Topic,
Partition: partitions[i].Partition,
Offset: 0,
}, 0)
if err != nil {
panic(err)
}
}
partitions, err = consumer.Assignment()
if err != nil {
panic(err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
}
err = consumer.Close()
if err != nil {
panic(err)
}
}
func prepareEnv(db *sql.DB) {
_, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400")
if err != nil {
panic(err)
}
_, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq")
if err != nil {
panic(err)
}
}
{{#include docs/examples/go/demo/consumerws/main.go}}
```
</TabItem>

View File

@ -13,15 +13,24 @@ import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
import RequestId from "./_request_id.mdx";
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
`taos` is the official Rust client library for TDengine. Rust developers can develop applications to access the TDengine instance data.
`taos` provides two ways to establish connections. One is the **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc). The other is the **WebSocket connection**, which connects to TDengine instances via the WebSocket interface provided by taosAdapter. You can specify a connection type with Cargo features. By default, both types are supported. The Websocket connection can be used on any platform. The native connection can be used on any platform that the TDengine Client supports.
The source code for the Rust client library is located on [GitHub](https://github.com/taosdata/taos-connector-rust).
## Connection types
`taos` provides two ways to establish connections, among which we recommend using **websocket connection**.
- **Native Connection**, which connects to TDengine instances via the TDengine client driver (taosc).
- **WebSocket connection**, which connects to TDengine instances via the WebSocket interface provided by taosAdapter.
You can specify a connection type with Cargo features. By default, both types are supported.
For a detailed introduction of the connection types, please refer to: [Establish Connection](../../develop/connect/#establish-connection)
## Supported platforms
Native connections are supported on the same platforms as the TDengine client driver.
@ -31,8 +40,11 @@ Websocket connections are supported on all platforms that can run Go.
| connector-rust version | TDengine version | major features |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.9.2 | 3.0.7.0 or later | STMT: Get tag_fields and col_fields under ws. |
| v0.8.12 | 3.0.5.0 | TMQ: Get consuming progress and seek offset to consume. |
| v0.12.0 | 3.2.3.0 or later | WS supports compression |
| v0.11.0 | 3.2.0.0 | TMQ feature optimization |
| v0.10.0 | 3.1.0.0 | WS endpoint changes |
| v0.9.2 | 3.0.7.0 | STMT: Get tag_fields and col_fields under ws. |
| v0.8.12 | 3.0.5.0 | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
| v0.6.0 | 3.0.0.0 | Base features. |
@ -269,67 +281,47 @@ There are two ways to query data: Using built-in types or the [serde](https://se
### Create database and tables
```rust
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let db = "query";
// create database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
// create table
taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
]).await?;
}
{{#include docs/examples/rust/nativeexample/examples/query.rs:create_db_and_table}}
```
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### Insert data
<RustInsert />
```rust
{{#include docs/examples/rust/nativeexample/examples/query.rs:insert_data}}
```
### Query data
<RustQuery />
```rust
{{#include docs/examples/rust/nativeexample/examples/query.rs:query_data}}
```
### execute SQL with req_id
This req_id can be used to request link tracing.
<RequestId />
```rust
let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?;
{{#include docs/examples/rust/nativeexample/examples/query.rs:query_with_req_id}}
```
### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
Parameter binding details see [API Reference](#stmt-api)
Parameter binding details see [API Reference](#bind-interface)
<RustBind />
```rust
{{#include docs/examples/rust/nativeexample/examples/stmt.rs}}
```
### Schemaless Writing
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../reference/schemaless/).
<RustSml />
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless.rs}}
```
### Schemaless with req_id
@ -352,25 +344,15 @@ TDengine starts subscriptions through [TMQ](../../taos-sql/tmq/).
#### Create a Topic
```rust
taos.exec_many([
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
.await?;
{{#include docs/examples/rust/nativeexample/examples/tmq.rs:create_topic}}
```
#### Create a Consumer
You create a TMQ connection by using a DSN.
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
Create a consumer:
```rust
let mut consumer = tmq.build()?;
{{#include docs/examples/rust/nativeexample/examples/tmq.rs:create_consumer}}
```
#### Subscribe to consume data
@ -378,40 +360,13 @@ let mut consumer = tmq.build()?;
A single consumer can subscribe to one or more topics.
```rust
consumer.subscribe(["tmq_meters"]).await?;
{{#include docs/examples/rust/nativeexample/examples/tmq.rs:subscribe}}
```
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
{{#include docs/examples/rust/nativeexample/examples/tmq.rs:consume}}
```
Get assignments
@ -419,7 +374,7 @@ Get assignments
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
{{#include docs/examples/rust/nativeexample/examples/tmq.rs:assignments}}
```
#### Assignment subscription Offset
@ -429,13 +384,13 @@ Seek offset
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
{{#include docs/examples/rust/nativeexample/examples/tmq.rs:seek_offset}}
```
#### Close subscriptions
```rust
consumer.unsubscribe().await;
{{#include docs/examples/rust/nativeexample/examples/tmq.rs:unsubscribe}}
```
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
@ -447,7 +402,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
- `auto.commit.interval.ms`: Interval for automatic commits.
#### Full Sample Code
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
### Use with connection pool
@ -559,7 +514,7 @@ The [Taos][struct.Taos] object provides an API to perform operations on multiple
// binary/varchar to String
location: String,
}
let records: Vec<Record> = taos
.query("select * from `meters`")
.await?
@ -578,11 +533,7 @@ Note that Rust asynchronous functions and an asynchronous runtime are required.
In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage.
<p>
<a id="stmt-api" style={{color:'#141414'}}>
Bind Interface
</a>
</p>
### Bind Interface
Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement.

View File

@ -6,15 +6,25 @@ description: This document describes taospy, the TDengine Python client library.
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import RequestId from "./_request_id.mdx";
`taospy` is the official Python client library for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine.
The source code for the Python client library is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Connection types
`taospy` mainly provides 3 connection types, among which we recommend using **websocket connection**.
- **Native connection**, which correspond to the `taos` modules of the `taospy` package, connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface.
- **REST connection**, which correspond to the `taosrest` modules of the `taospy` package, which is implemented through taosAdapter. Some features like schemaless and subscriptions are not supported.
- **Websocket connection** `taos-ws-py` is an optional package to enable using WebSocket to connect TDengine, which is implemented through taosAdapter. The set of features implemented by the WebSocket connection differs slightly from those implemented by the native connection.
For a detailed introduction of the connection types, please refer to: [Establish Connection](../../develop/connect/#establish-connection)
`taospy` is the official Python client library for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
`taos-ws-py` is an optional package to enable using WebSocket to connect TDengine.
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST or WebSocket interface provided by taosAdapter is referred to hereinafter as a "REST connection" or "WebSocket connection".
The source code for the Python client library is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Supported platforms
- The [supported platforms](../#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
@ -95,7 +105,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
If you use a native connection, you will also need to [Install Client Driver](../#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
If you use a native connection, you will also need to [Install Client Driver](../#install-client-driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
### Install via pip
@ -348,13 +358,7 @@ If the configuration parameters are duplicated in the parameters or client confi
<TabItem value="native" label="native connection">
```python
conn = taos.connect()
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE test")
# change database. same as execute "USE db"
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
{{#include docs/examples/python/create_db_native.py}}
```
</TabItem>
@ -362,12 +366,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat
<TabItem value="rest" label="REST connection">
```python
conn = taosrest.connect(url="http://localhost:6041")
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE test")
conn.execute("USE test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
{{#include docs/examples/python/create_db_rest.py}}
```
</TabItem>
@ -375,12 +374,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat
<TabItem value="websocket" label="WebSocket connection">
```python
conn = taosws.connect("taosws://localhost:6041")
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE test")
conn.execute("USE test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
{{#include docs/examples/python/create_db_ws.py}}
```
</TabItem>
@ -388,100 +382,35 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat
### Insert data
```python
conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
```
:::
now is an internal function. The default is the current time of the client's computer. now + 1s represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years).
:::
### Basic Usage
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
##### TaosConnection class
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
```python title="execute method"
{{#include docs/examples/python/connection_usage_native_reference.py:insert}}
```python
{{#include docs/examples/python/insert_native.py:insert}}
```
```python title="query method"
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
```
:::tip
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
:::
##### Use of TaosResult class
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
```python title="blocks_iter method"
{{#include docs/examples/python/result_set_examples.py}}
```
##### Use of the TaosCursor class
The `TaosConnection` class and the `TaosResult` class already implement all the functionality of the native interface. If you are familiar with the interfaces in the PEP249 specification, you can also use the methods provided by the `TaosCursor` class.
```python title="Use of TaosCursor"
{{#include docs/examples/python/cursor_usage_native_reference.py}}
```
:::note
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
The best practice for TaosCursor is to create a cursor at the beginning of a query and close it immediately after use. Please avoid reusing the same cursor for multiple executions.
:::
</TabItem>
<TabItem value="rest" label="REST connection">
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_example.py}}
```python
{{#include docs/examples/python/insert_rest.py:insert}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
##### Use of TaosRestCursor class
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
```python title="Use of TaosRestCursor"
{{#include docs/examples/python/connect_rest_examples.py:basic}}
```
- `cursor.execute`: Used to execute arbitrary SQL statements.
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
:::note
The best practice for TaosRestCursor is to create a cursor at the beginning of a query and close it immediately after use. Please avoid reusing the same cursor for multiple executions.
:::
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `Connection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
{{#include docs/examples/python/insert_ws.py:insert}}
```
- `conn.execute`: can use to execute arbitrary SQL statements, and return the number of rows affected.
- `conn.query`: can use to execute query SQL statements, and return the query results.
</TabItem>
</Tabs>
> NOW is an internal function. The default is the current time of the client's computer.
> `NOW + 1s` represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years).
### Querying Data
<Tabs defaultValue="rest">
@ -490,7 +419,7 @@ The `Connection` class contains both an implementation of the PEP249 Connection
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
```python
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
{{#include docs/examples/python/insert_native.py:query}}
```
:::tip
@ -504,7 +433,7 @@ The queried results can only be fetched once. For example, only one of `fetch_al
The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python
{{#include docs/examples/python/rest_client_example.py}}
{{#include docs/examples/python/insert_rest.py:query}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
@ -516,7 +445,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
{{#include docs/examples/python/insert_ws.py:query}}
```
</TabItem>
@ -524,61 +453,25 @@ The `query` method of the `TaosConnection` class can be used to query data and r
### Execute SQL with reqId
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
<RequestId />
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
##### TaosConnection class
As the way to connect introduced above but add `req_id` argument.
```python title="execute method"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
```
```python title="query method"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
```
##### Use of TaosResult class
As the way to fetch data introduced above but add `req_id` argument.
```python title="blocks_iter method"
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
```
##### Use of the TaosCursor class
The `TaosConnection` class and the `TaosResult` class already implement all the functionality of the native interface. If you are familiar with the interfaces in the PEP249 specification, you can also use the methods provided by the `TaosCursor` class.
```python title="Use of TaosCursor"
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
```python
{{#include docs/examples/python/insert_native.py:req_id}}
```
</TabItem>
<TabItem value="rest" label="REST connection">
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](../../reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
##### Use of TaosRestCursor class
As the way to connect introduced above but add `req_id` argument.
```python title="Use of TaosRestCursor"
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
```python
{{#include docs/examples/python/insert_rest.py:req_id}}
```
- `cursor.execute`: Used to execute arbitrary SQL statements.
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
</TabItem>
@ -587,36 +480,7 @@ As the way to connect introduced above but add `req_id` argument.
As the way to connect introduced above but add `req_id` argument.
```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
```
- `conn.execute`: can use to execute arbitrary SQL statements, and return the number of rows affected.
- `conn.query`: can use to execute query SQL statements, and return the query results.
</TabItem>
</Tabs>
### Used with pandas
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
```python
{{#include docs/examples/python/conn_native_pandas.py}}
```
</TabItem>
<TabItem value="rest" label="REST connection">
```python
{{#include docs/examples/python/conn_rest_pandas.py}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
```python
{{#include docs/examples/python/conn_websocket_pandas.py}}
{{#include docs/examples/python/insert_ws.py:req_id}}
```
</TabItem>
@ -629,126 +493,15 @@ The Python client library provides a parameter binding api for inserting data. S
<Tabs>
<TabItem value="native" label="native connection">
##### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taos
conn = taos.connect()
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
##### parameter binding
Call the `new_multi_binds` function to create the parameter list for parameter bindings.
```
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
```
Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
```
stmt.bind_param_batch(params)
```
##### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
##### Close Stmt
```
stmt.close()
```
##### Example
```python
{{#include docs/examples/python/stmt_example.py}}
{{#include docs/examples/python/stmt_native.py:stmt}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
##### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taosws
conn = taosws.connect('taosws://localhost:6041/test')
stmt = conn.statement()
```
##### Prepare sql
Call `prepare` method in stmt to prepare sql.
```
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
##### parameter binding
Call the `bind_param` method to bind parameters.
```
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
```
Call the `add_batch` method to add parameters to the batch.
```
stmt.add_batch()
```
##### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
##### Close Stmt
```
stmt.close()
```
##### Example
```python
{{#include docs/examples/python/stmt_websocket_example.py}}
{{#include docs/examples/python/stmt_ws.py:stmt}}
```
</TabItem>
</Tabs>
@ -758,46 +511,18 @@ stmt.close()
Client library support schemaless insert.
<Tabs defaultValue="list">
<TabItem value="list" label="List Insert">
##### Simple insert
<TabItem value="list" label="native connection">
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
##### Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
##### Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
{{#include docs/examples/python/schemaless_native.py}}
```
</TabItem>
<TabItem value="raw" label="Raw Insert">
##### Simple insert
<TabItem value="raw" label="WebSocket connection">
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
##### Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
##### Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
{{#include docs/examples/python/schemaless_ws.py}}
```
</TabItem>
@ -808,11 +533,12 @@ Client library support schemaless insert.
There is a optional parameter called `req_id` in `schemaless_insert` and `schemaless_insert_raw` method. This reqId can be used to request link tracing.
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
conn.schemaless_insert(
lines=lineDemo,
protocol=taos.SmlProtocol.LINE_PROTOCOL,
precision=taos.SmlPrecision.NANO_SECONDS,
req_id=1,
)
```
### Data Subscription
@ -821,194 +547,56 @@ Client library support data subscription. For more information about subscroptio
#### Create a Topic
To create topic, please refer to [Data Subscription](../../develop/tmq/#create-a-topic).
```python
{{#include docs/examples/python/tmq_native.py:create_topic}}
```
#### Create a Consumer
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
The consumer in the client library contains the subscription api. The syntax for creating a consumer is consumer = Consumer(configs). For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer).
```python
from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
{{#include docs/examples/python/tmq_native.py:create_consumer}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
In addition to native connections, the client library also supports subscriptions via websockets.
The syntax for creating a consumer is "consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer).
```python
import taosws
consumer = taosws.Consumer(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
</TabItem>
</Tabs>
#### Subscribe to a Topic
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
The `subscribe` function is used to subscribe to a list of topics.
```python
consumer.subscribe(['topic1', 'topic2'])
{{#include docs/examples/python/tmq_native.py:subscribe}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `subscribe` function is used to subscribe to a list of topics.
```python
consumer.subscribe(['topic1', 'topic2'])
```
</TabItem>
</Tabs>
#### Consume messages
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
```python
while True:
message = consumer.poll(1)
if not message:
continue
err = message.error()
if err is not None:
raise err
val = message.value()
for block in val:
print(block.fetchall())
{{#include docs/examples/python/tmq_native.py:consume}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out.
```python
while True:
message = consumer.poll(1)
if not message:
continue
for block in message:
for row in block:
print(row)
```
</TabItem>
</Tabs>
#### Assignment subscription Offset
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
The `assignment` function is used to get the assignment of the topic.
```python
assignments = consumer.assignment()
{{#include docs/examples/python/tmq_native.py:assignment}}
```
The `seek` function is used to reset the assignment of the topic.
```python
tp = TopicPartition(topic='topic1', partition=0, offset=0)
consumer.seek(tp)
{{#include docs/examples/python/tmq_native.py:consume}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `assignment` function is used to get the assignment of the topic.
```python
assignments = consumer.assignment()
```
The `seek` function is used to reset the assignment of the topic.
```python
consumer.seek(topic='topic1', partition=0, offset=0)
```
</TabItem>
</Tabs>
#### Close subscriptions
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
You should unsubscribe to the topics and close the consumer after consuming.
```python
consumer.unsubscribe()
consumer.close()
{{#include docs/examples/python/tmq_native.py:unsubscribe}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
You should unsubscribe to the topics and close the consumer after consuming.
```python
consumer.unsubscribe()
consumer.close()
```
</TabItem>
</Tabs>
#### Full Sample Code
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
```python
{{#include docs/examples/python/tmq_example.py}}
{{#include docs/examples/python/tmq_native.py}}
```
```python
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
```python
{{#include docs/examples/python/tmq_websocket_example.py}}
```
```python
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |

View File

@ -7,35 +7,30 @@ toc_max_heading_level: 4
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import RequestId from "./_request_id.mdx";
import Preparition from "./_preparation.mdx";
import NodeInsert from "../07-develop/03-insert-data/_js_sql.mdx";
import NodeInfluxLine from "../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../07-develop/03-insert-data/_js_opts_telnet.mdx";
import NodeOpenTSDBJson from "../07-develop/03-insert-data/_js_opts_json.mdx";
import NodeQuery from "../07-develop/04-query-data/_js.mdx";
`@tdengine/websocket` is the official Node.js client library for TDengine. Node.js developers can develop applications to access the TDengine instance data.
`@tdengine/client` and `@tdengine/rest` are the official Node.js client libraries. Node.js developers can develop applications to access TDengine instance data. Note: The client libraries for TDengine 3.0 are different than those for TDengine 2.x. The new client libraries do not support TDengine 2.x.
The source code for the Node.js client library is hosted on [GitHub](https://github.com/taosdata/taos-connector-node/tree/main).
`@tdengine/client` is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. `@tdengine/rest` is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The REST client library can run on any platform, but performance is slightly degraded, and the interface implements a somewhat different set of functional features than the native interface.
## Connection types
The source code for the Node.js client libraries is located on [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0).
Node.js connector supports only websocket connection through taosAdapter.
For a detailed introduction of the connection types, please refer to: [Establish Connection](../../develop/connect/#establish-connection)
## Supported platforms
The platforms supported by the native client library are the same as those supported by the TDengine client driver.
The REST client library supports all platforms that can run Node.js.
Node.js client library needs to be run with Node.js 14 or higher version.
## Version support
## Recent update logs
Please refer to [version support list](../#version-support)
| Node.js connector version | major changes | TDengine 版本 |
| :-----------------------: | :------------------: | :----------------:|
| 3.1.0 | new version, supports websocket | 3.2.0.0 or later |
## Supported features
<Tabs defaultValue="native">
<TabItem value="native" label="Native connection">
1. Connection Management
2. General Query
3. Continuous Query
@ -43,294 +38,300 @@ Please refer to [version support list](../#version-support)
5. Subscription
6. Schemaless
</TabItem>
<TabItem value="rest" label="REST connection">
## Handling exceptions
1. Connection Management
2. General Query
3. Continuous Query
After an error is reported, the error message and error code can be obtained through try catch. The Node.js client library error code is between 100 and 110, while the other error codes are for the TDengine function module.
</TabItem>
</Tabs>
Please refer to the table below for error code, error description and corresponding suggestions.
| Error Code | Description | Suggested Actions |
| ---------- | -------------------------------------------------------------| -------------------------------------------------------------------------------------------------- |
| 100 | invalid variables | The parameter is invalid. Check the interface specification and adjust the parameter type and size.|
| 101 | invalid url | URL error, please check if the url is correct. |
| 102 | received server data but did not find a callback for processing | Client waiting timeout, please check network and TaosAdapter status. |
| 103 | invalid message type | Please check if the client version and server version match. |
| 104 | connection creation failed | Connection creation failed. Please check the network and TaosAdapter status. |
| 105 | websocket request timeout | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the TaosAdapter.|
| 106 | authentication fail | Authentication failed, please check if the username and password are correct. |
| 107 | unknown sql type in tdengine | Check the data type supported by TDengine. |
| 108 | connection has been closed | The connection has been closed, check the connection status, or recreate the connection to execute the relevant instructions. |
| 109 | fetch block data parse fail | Please check if the client version and server version match. |
| 110 | websocket connection has reached its maximum limit | Please check if the connection has been closed after use |
## Data Type Mapping
The table below despicts the mapping between TDengine data type and Node.js data type.
| TDengine Data Type | Node.js Data Type|
|-------------------|-------------|
| TIMESTAMP | bigint |
| TINYINT | number |
| SMALLINT | number |
| INT | number |
| BIGINT | bigint |
| TINYINT UNSIGNED | number |
| SMALLINT UNSIGNED | number |
| INT UNSIGNED | number |
| BIGINT UNSIGNED | bigint |
| FLOAT | number |
| DOUBLE | number |
| BOOL | boolean |
| BINARY | string |
| NCHAR | string |
| JSON | string |
| VARBINARY | ArrayBuffer |
| GEOMETRY | ArrayBuffer |
**Note**: Only TAG supports JSON types
## Installation Steps
### Pre-installation preparation
- Install the Node.js development environment
- If you are using the REST client library, skip this step. However, if you use the native client library, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS.
- Install the Node.js development environment, using version 14 or above. Download link: https://nodejs.org/en/download/
<Tabs defaultValue="Linux">
<TabItem value="Linux" label="Linux system installation dependencies">
- `python` (recommended for `v2.7` , `v3.x.x` currently not supported)
- `@tdengine/client` 3.0.0 supports Node.js LTS v10.9.0 or later and Node.js LTS v12.8.0 or later. Older versions may be incompatible.
- `make`
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or later.
</TabItem>
<TabItem value="macOS" label="macOS installation dependencies">
- `python` (recommended for `v2.7` , `v3.x.x` currently not supported)
- `@tdengine/client` 3.0.0 currently supports Node.js from v12.22.12, but only later versions of v12. Other versions may be incompatible.
- `make`
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or later.
</TabItem>
<TabItem value="Windows" label="Windows system installation dependencies">
- Installation method 1
Use Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) to execute `npm install --global --production` from the `cmd` command-line interface to install all the necessary tools.
- Installation method 2
Manually install the following tools.
- Install Visual Studio related: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
- Install [Python](https://www.python.org/downloads/) 2.7 (`v3.x.x` is not supported) and execute `npm config set python python2.7`.
- Go to the `cmd` command-line interface, `npm config set msvs_version 2017`
Refer to Microsoft's Node.js User Manual [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules).
If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64".
</TabItem>
</Tabs>
### Install via npm
<Tabs defaultValue="install_rest">
<TabItem value="install_native" label="Install native clieny library">
### Install Node.js client library via npm
```bash
npm install @tdengine/client
npm install @tdengine/websocket
```
</TabItem>
<TabItem value="install_rest" label="Install REST client library">
```bash
npm install @tdengine/rest
```
</TabItem>
</Tabs>
### Verify
<Tabs defaultValue="native">
<TabItem value="native" label="Native client library">
After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine.
Verification in details:
- Create an installation test folder such as `~/tdengine-test`. Download the [nodejsChecker.js source code](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/nodejsChecker.js) to your local machine.
- Create an installation test folder such as `~/tdengine-test`. Download the [nodejsChecker.js](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/nodejsChecker.js) to your local machine.
- Execute the following command from the command-line.
```bash
npm init -y
npm install @tdengine/client
node nodejsChecker.js host=localhost
npm init -y
npm install @tdengine/websocket
node nodejsChecker.js host=localhost
```
- After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query.
</TabItem>
<TabItem value="rest" label="REST client library">
After installing the TDengine client, use the `restChecker.js` program to verify that the current environment supports Node.js access to TDengine.
Verification in details:
- Create an installation test folder such as `~/tdengine-test`. Download the [restChecker.js source code](https://github.com/taosdata/TDengine/tree/3.0/docs/examples/node/restexample/restChecker.js) to your local.
- Execute the following command from the command-line.
```bash
npm init -y
npm install @tdengine/rest
node restChecker.js
```
- After executing the above steps, the command-line will output the result of `restChecker.js` connecting to the TDengine instance and performing a simple insert and query.
</TabItem>
</Tabs>
## Establishing a connection
Please choose to use one of the client libraries.
Install and import the `@tdengine/websocket` package.
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
Install and import the `@tdengine/client` package.
**Note**: After using the Node.js client library, it is necessary to call taos.destroy() Release connector resources.
```javascript
//A cursor also needs to be initialized in order to interact with TDengine from Node.js.
const taos = require("@tdengine/client");
var conn = taos.connect({
host: "127.0.0.1",
user: "root",
password: "taosdata",
config: "/etc/taos",
port: 0,
});
var cursor = conn.cursor(); // Initializing a new cursor
const taos = require("@tdengine/websocket");
//Close a connection
conn.close();
//database operations......
taos.destroy();
```
</TabItem>
<TabItem value="rest" label="REST connection">
Install and import the `@tdengine/rest` package.
```javascript
//A cursor also needs to be initialized in order to interact with TDengine from Node.js.
import { options, connect } from "@tdengine/rest";
options.path = "/rest/sql";
// set host
options.host = "localhost";
// set other options like user/passwd
let conn = connect(options);
let cursor = conn.cursor();
WSConfig configures the Websocket parameters as follows:
getToken(): string | undefined | null;
setToken(token: string): void;
getUser(): string | undefined | null;
setUser(user: string): void;
getPwd(): string | undefined | null;
setPwd(pws: string): void;
getDb(): string | undefined | null;
setDb(db: string): void;
getUrl(): string;
setUrl(url: string): void;
setTimeOut(ms: number): void;
getTimeOut(): number | undefined | null;
```
</TabItem>
</Tabs>
```javascript
{{#include docs/examples/node/websocketexample/sql_example.js:createConnect}}
```
## Usage examples
### Write data
### Create database and tables
#### SQL Write
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
<NodeInsert />
</TabItem>
<TabItem value="rest" label="REST connection">
```js
{{#include docs/examples/node/restexample/insert_example.js}}
```javascript
{{#include docs/examples/node/websocketexample/sql_example.js:create_db_and_table}}
```
</TabItem>
</Tabs>
**Note**: If you do not use `USE power` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as power.meters.
#### InfluxDB line protocol write
### Insert data
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
```javascript
{{#include docs/examples/node/websocketexample/sql_example.js:insertData}}
```
<NodeInfluxLine />
</TabItem>
</Tabs>
#### OpenTSDB Telnet line protocol write
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
<NodeOpenTSDBTelnet />
</TabItem>
</Tabs>
#### OpenTSDB JSON line protocol write
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
<NodeOpenTSDBJson />
</TabItem>
</Tabs>
> NOW is an internal function. The default is the current time of the client's computer.
> `NOW + 1s` represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years).
### Querying data
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
<NodeQuery />
</TabItem>
<TabItem value="rest" label="REST connection">
```js
{{#include docs/examples/node/restexample/query_example.js}}
```javascript
{{#include docs/examples/node/websocketexample/sql_example.js:queryData}}
```
</TabItem>
</Tabs>
> Discovered data structure
```javascript
wsRow:meta:=> [
{ name: 'ts', type: 'TIMESTAMP', length: 8 },
{ name: 'current', type: 'FLOAT', length: 4 },
{ name: 'voltage', type: 'INT', length: 4 },
{ name: 'phase', type: 'FLOAT', length: 4 },
{ name: 'location', type: 'VARCHAR', length: 64},
{ name: 'groupid', type: 'INT', length: 4 }
]
wsRow:data:=> [
[ 1714013737536n, 12.3, 221, 0.31, 'California.SanFrancisco', 3 ]
]
```
### Execute SQL with reqId
<RequestId />
```javascript
{{#include docs/examples/node/websocketexample/sql_example.js:sqlWithReqid}}
```
### Writing data via parameter binding
The Node.js client library provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
**Note**: Do not use `db.?` in prepareStatement when specify the database with the table name, should directly use `?`, then specify the database in setTableName, for example: `prepareStatement.setTableName("db.t1")`.
Sample Code:
```javascript
{{#include docs/examples/node/websocketexample/stmt_example.js}}
```
The methods to set TAGS values or VALUES columns:
```javascript
setBoolean(params: any[]): void;
setTinyInt(params: any[]): void;
setUTinyInt(params: any[]): void;
setSmallInt(params: any[]): void;
setUSmallInt(params: any[]): void;
setInt(params: any[]): void;
setUInt(params: any[]): void;
setBigint(params: any[]): void;
setUBigint(params: any[]): void;
setFloat(params: any[]): void;
setDouble(params: any[]): void;
setVarchar(params: any[]): void;
setBinary(params: any[]): void;
setNchar(params: any[]): void;
setJson(params: any[]): void;
setVarBinary(params: any[]): void;
setGeometry(params: any[]): void;
setTimestamp(params: any[]): void;
```
**Note**: Only TAG supports JSON types
### Schemaless Writing
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../reference/schemaless/).
```javascript
{{#include docs/examples/node/websocketexample/line_example.js}}
```
### Schemaless with reqId
This reqId can be used to request link tracing.
```javascript
await wsSchemaless.schemalessInsert([influxdbData], SchemalessProto.InfluxDBLineProtocol, Precision.NANO_SECONDS, ttl, reqId);
await wsSchemaless.schemalessInsert([telnetData], SchemalessProto.OpenTSDBTelnetLineProtocol, Precision.NANO_SECONDS, ttl, reqId);
await wsSchemaless.schemalessInsert([jsonData], SchemalessProto.OpenTSDBJsonFormatProtocol, Precision.NANO_SECONDS, ttl, reqId);
```
### Data Subscription
The TDengine Node.js client library supports subscription functionality with the following application API.
#### Create a Topic
```javascript
{{#include docs/examples/node/websocketexample/tmq_example.js:create_topic}}
```
#### Create a Consumer
```javascript
{{#include docs/examples/node/websocketexample/tmq_example.js:create_consumer}}
```
**Parameter Description**
- taos.TMQConstants.CONNECT_USER: username.
- taos.TMQConstants.CONNECT_PASS: password.
- taos.TMQConstants.GROUP_ID: Specifies the group that the consumer is in.
- taos.TMQConstants.CLIENT_ID: client id.
- taos.TMQConstants.WS_URL: The URL address of TaosAdapter.
- taos.TMQConstants.AUTO_OFFSET_RESET: When offset does not exist, where to start consumption, the optional value is earliest or latest, the default is latest.
- taos.TMQConstants.ENABLE_AUTO_COMMIT: Specifies whether to commit automatically.
- taos.TMQConstants.AUTO_COMMIT_INTERVAL_MS: Automatic submission interval, the default value is 5000 ms.
- taos.TMQConstants.CONNECT_MESSAGE_TIMEOUT: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
For more information, see [Consumer Parameters](../../develop/tmq). Note that the default value of auto.offset.reset in data subscription on the TDengine server has changed since version 3.2.0.0.
#### Subscribe to consume data
```javascript
{{#include docs/examples/node/websocketexample/tmq_example.js:subscribe}}
```
#### Assignment subscription Offset
```javascript
{{#include docs/examples/node/websocketexample/tmq_example.js:assignment}}
```
#### Close subscriptions
```java
// Unsubscribe
consumer.unsubscribe();
// Close consumer
consumer.close()
// free connector resource
taos.destroy();
```
For more information, see [Data Subscription](../../develop/tmq).
#### Full Sample Code
```javascript
{{#include docs/examples/node/websocketexample/tmq_example.js}}
```
## More sample programs
| Sample Programs | Sample Program Description |
| --------------------------------------------------------------------------------------------------------------------------------- --------- | -------------------------------------- |
| [basicUse](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/queryExample.js) | Basic operations such as establishing connections and running SQl commands. |
| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindParamBatch.js) | Binding multi-line parameter insertion. | |
| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindSingleParamBatch.js) | Columnar binding parameter insertion |
| [stmtQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/stmtQuery.js) | Binding parameter query |
| [schemless insert](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/schemaless.js) | Schemaless insert |
| [TMQ](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/tmq.js) | Using data subscription |
| [asyncQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/asyncQueryExample.js) | Using asynchronous queries |
| [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | Using TypeScript with the REST client library |
| ---------------------------------------------------------------------------------------------------------------------------------| -------------------------------------- |
| [sql_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/sql_example.js) | Basic operations such as establishing connections and running SQl commands.|
| [stmt_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/stmt_example.js) | Binding multi-line parameter insertion. | |
| [line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/line_example.js) | Schemaless insert |
| [telnet_line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/telnet_line_example.js) | OpenTSDB Telnet insert |
| [json_line_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/json_line_example.js) | OpenTSDB Json insert |
| [tmq_example](https://github.com/taosdata/TDengine/tree/main/docs/examples/node/websocketexample/tmq_example.js) | Using data subscription |
## Usage limitations
`@tdengine/client` 3.0.0 supports Node.js LTS v12.8.0 to 12.9.1 and 10.9.0 to 10.20.0.
- Node.js client library (`@tdengine/websocket`) supports Node.js 14 or higher.
- It supports only WebSocket connection, so taosAdapter needs to be started in advance.
- After using the connect, you need to call taos.destroy(); Release connector resources.
## Frequently Asked Questions
1. Using REST connections requires starting taosadapter.
```bash
sudo systemctl start taosadapter
```
2. Node.js versions
`@tdengine/client` supports Node.js v10.9.0 to 10.20.0 and 12.8.0 to 12.9.1.
3. "Unable to establish connection", "Unable to resolve FQDN"
Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot.
## Important update records
### Native client library
| package name | version | TDengine version | Description |
|------------------|---------|---------------------|------------------------------------------------------------------|
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
### REST client library
| package name | version | TDengine version | Description |
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
1. "Unable to establish connection" or "Unable to resolve FQDN"
**Solution**: Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](../../train-faq/faq/#2-how-can-i-resolve-the-unable-to-establish-connection-error) to troubleshoot.

View File

@ -7,19 +7,23 @@ toc_max_heading_level: 4
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import RequestId from "./_request_id.mdx";
`TDengine.Connector` is the C# language connector provided by TDengine. C# developers can use it to develop C# application software that accesses TDengine cluster data.
The `TDengine.Connector` connector supports establishing a connection with the TDengine running instance through the TDengine client driver (taosc), and provides functions such as data writing, query, data subscription, schemaless data writing, and parameter binding interface data writing. `TDengine.Connector` also supports WebSocket since v3.0.1, establishes WebSocket connection, and provides functions such as data writing, query, and parameter binding interface data writing.
## Connection types
This article introduces how to install `TDengine.Connector` in a Linux or Windows environment, and connect to the TDengine cluster through `TDengine.Connector` to perform basic operations such as data writing and querying.
`TDengine.Connector` provides 2 connection types.
* **Native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface.
* **WebSocket connection** which is implemented through taosAdapter. The set of features implemented by the WebSocket connection differs slightly from those implemented by the native connection.(since v3.0.1)
For a detailed introduction of the connection types, please refer to: [Establish Connection](../../develop/connect/#establish-connection)
## Compatibility
:::warning
* `TDengine.Connector` version 3.1.0 has been completely refactored and is no longer compatible with 3.0.2 and previous versions. For 3.0.2 documents, please refer to [nuget](https://www.nuget.org/packages/TDengine.Connector/3.0.2)
* `TDengine.Connector` 3.x is not compatible with TDengine 2.x. If you need to use the C# connector in an environment running TDengine 2.x version, please use the 1.x version of TDengine.Connector.
:::
The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com/taosdata/taos-connector-dotnet/tree/3.0).
## Supported platforms
@ -31,9 +35,12 @@ TDengine no longer supports 32-bit Windows platforms.
## Version support
| **Connector version** | **TDengine version** |
|-----------------------|----------------------|
| 3.1.0 | 3.2.1.0/3.1.1.18 |
| **Connector version** | **TDengine version** | **major features** |
|-----------------------|----------------------|--------------------------------------|
| 3.1.3 | 3.2.1.0/3.1.1.18 | support WebSocket reconnect |
| 3.1.2 | 3.2.1.0/3.1.1.18 | fix schemaless result release |
| 3.1.1 | 3.2.1.0/3.1.1.18 | support varbinary and geometry |
| 3.1.0 | 3.2.1.0/3.1.1.18 | WebSocket uses native implementation |
## Handling exceptions
@ -58,6 +65,8 @@ TDengine no longer supports 32-bit Windows platforms.
| BINARY | byte[] |
| NCHAR | string (utf-8 encoding) |
| JSON | byte[] |
| VARBINARY | byte[] |
| GEOMETRY | byte[] |
**Note**: JSON type is only supported in tag.
@ -67,7 +76,7 @@ TDengine no longer supports 32-bit Windows platforms.
* Install [.NET SDK](https://dotnet.microsoft.com/download)
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
* Install the TDengine client driver. For specific steps, please refer to [Installing the client driver](../#install-client-driver)
* Only for Native connections, you need to install the TDengine client driver. For specific steps, please refer to [Installing the client driver](../#install-client-driver)
### Install the connectors
@ -127,6 +136,12 @@ The parameters supported by `ConnectionStringBuilder` are as follows:
* connTimeout: WebSocket connection timeout, only valid when the protocol is WebSocket, the default is 1 minute, use the `TimeSpan.Parse` method to parse the string into a `TimeSpan` object.
* readTimeout: WebSocket read timeout, only valid when the protocol is WebSocket, the default is 5 minutes, use the `TimeSpan.Parse` method to parse the string into a `TimeSpan` object.
* writeTimeout: WebSocket write timeout, only valid when the protocol is WebSocket, the default is 10 seconds, use the `TimeSpan.Parse` method to parse the string into a `TimeSpan` object.
* enableCompression: Whether to enable WebSocket compression (effective for dotnet version 6 and above, connector version 3.1.1 and above). The default is false.
* autoReconnect: Whether to enable WebSocket reconnect (connector version 3.1.3 and above). The default is false.
> **Note**Enabling automatic reconnection is only effective for simple SQL statement execution, schemaless writing, and data subscription. It is not effective for parameter binding. Automatic reconnection is only effective for the database specified by parameters when the connection is established, and it is not effective for the `use db` statement to switch databases later.
* reconnectRetryCount: The number of reconnection retries (connector version 3.1.3 and above). The default is 3.
* reconnectIntervalMs: The interval between reconnection retries (connector version 3.1.3 and above). The default is 2000.
### Specify the URL and Properties to get the connection
@ -407,6 +422,8 @@ namespace WSQuery
### execute SQL with reqId
<RequestId />
<Tabs defaultValue="native" groupId="connect">
<TabItem value="native" label="native connection">
@ -800,6 +817,10 @@ The configuration parameters supported by consumer are as follows:
* auto.commit.interval.ms: The interval for automatically submitting offsets, the default is 5000 milliseconds
* auto.offset.reset: When offset does not exist, where to start consumption, the optional value is earliest or latest, the default is latest
* msg.with.table.name: Whether the message contains the table name
* ws.message.enableCompression: Whether to enable WebSocket compression (effective for dotnet version 6 and above, connector version 3.1.1 and above). The default is false.
* ws.autoReconnect: Whether to enable WebSocket reconnect (connector version 3.1.3 and above). The default is false.
* ws.reconnect.retry.count: The number of reconnection retries (connector version 3.1.3 and above). The default is 3.
* ws.reconnect.interval.ms: The interval between reconnection retries (connector version 3.1.3 and above). The default is 2000.
Supports subscribing to the result set `Dictionary<string, object>` where the key is the column name and the value is the column value.
@ -1185,4 +1206,4 @@ namespace WSADO
### More sample programs
[sample program](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples)
[sample program](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples)

View File

@ -6,32 +6,35 @@ title: TDengine ODBC
## Introduction
TDengine ODBC driver is a driver specifically designed for TDengine based on the ODBC standard. It can be used by ODBC based applications on Windows to access a local or remote TDengine cluster or TDengine cloud service, like [PowerBI](https://powerbi.microsoft.com).
The TDengine ODBC driver is a driver specifically designed for TDengine based on the ODBC standard. It can be used by ODBC based applications,like [PowerBI](https://powerbi.microsoft.com), on Windows, to access a local or remote TDengine cluster or an instance in the TDengine Cloud service.
TDengine ODBC provides two kinds of connections, native connection and WebSocket connection. You can choose to use either one for your convenience, WebSocket is recommded choice and you must use WebSocket if you are trying to access TDengine cloud service.
TDengine ODBC provides two kinds of connections, native connection and WebSocket connection. You can choose to use either one for your convenience. WebSocket is the recommended choice and you must use WebSocket if you are trying to access an instance in the TDengine Cloud service.
Note: TDengine ODBC driver can only be run on 64-bit system, and can only be invoked by 64-bit applications.
Note: TDengine ODBC driver can only be run on 64-bit systems, and can only be invoked by 64-bit applications.
## Compatibility with ODBC Versions
- TDengine ODBC driver compatible with ODBC 3.8 and all earlier versions.
## Install
1. TDengine ODBC driver supports only Windows platform. To run on Windows, VisualStudio C Runtime library is required. If VisualStudio C Runtime Library is missing on your platform, you can download and install it from [VC Runtime Library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170).
1. The TDengine ODBC driver only supports the Windows platform. To run on Windows, the Microsoft Visual C++ Runtime library is required. If the Microsoft Visual C++ Runtime Library is missing on your platform, you can download and install it from [VC Runtime Library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170).
2. Install TDengine client package for Windows, the version should be above 3.2.1.0, the client package includes both TDengine ODBC driver and some other necessary libraries that will be used in either native connection or WebSocket connection.
2. Install TDengine Client package for Windows. The TDengine Client version should be above 3.2.1.0. The client package includes both the TDengine ODBC driver and some other necessary libraries that will be used in either native connection or WebSocket connection.
## Configure Data Source
### Connection Types
TDengine ODBC driver supports two kinds of connections to TDengine cluster, native connection and WebSocket connection, here is the major differences between them.
TDengine ODBC driver supports two kinds of connections to TDengine cluster: native connection and WebSocket connection. The major differences between them are listed below.
1. Only WebSocket can connect to TDengine cloud service.
1. Only a WebSocket connection can be used to connect to TDengine Cloud service.
2. Websocket connection is more compatible with different TDengine server versions, normally you don't need to uupgrade client package with the server side.
2. A Websocket connection is more compatible with different TDengine server versions. Usually, you don't need to upgrade the TDengine Client package along with the server side.
3. Native connection normally has better performance, but you need to keep the version aligned with the server side.
3. Native connections usually have better performance, but the TDengine Client version must be compatible with the TDengine server version.
4. For most users, it's recommended to use **WebSocket** connection, which has much better compatibility and almost same performance as native connection.
4. For most users, it's recommended to use **WebSocket** connection, which has much better compatibility and almost the same performance as native connection.
### WebSocket Connection
@ -57,9 +60,9 @@ TDengine ODBC driver supports two kinds of connections to TDengine cluster, nati
4.6 [Password]: optional field, only used for connection testing in step 5;
5. Click "Test Connecting" to test whether the data source can be connectted; if successful, it will prompt "connecting success"
5. Click "Test Connection" to test whether the connection to the data source is successful; if successful, it will prompt "Successfully connected to URL"
6. Click "OK" to sae the configuration and exit.
6. Click "OK" to set the configuration and exit.
7. You can also select an already configured data source name in step 2 to change existing configuration.
@ -72,4 +75,4 @@ The steps are exactly same as "WebSocket" connection, except for you choose "Nat
## PowerBI
As an example, you can use PowerBI, which inovkes TDengine ODBC driver, to access TDengine, please refer to[Power BI](../../third-party/powerbi) for more details.
As an example, you can use PowerBI, which invokes TDengine ODBC driver, to access TDengine, please refer to [Power BI](../../third-party/powerbi) for more details.

View File

@ -40,7 +40,7 @@ Because the version of TDengine client driver is tightly associated with that of
### Install TDengine Client Driver
Regarding how to install TDengine client driver please refer to [Install Client Driver](../#installation-steps)
Regarding how to install TDengine client driver please refer to [Install Client Driver](../#install-client-driver)
### Install php-tdengine

View File

@ -0,0 +1,7 @@
The reqId is very similar to TraceID in distributed tracing systems. In a distributed system, a request may need to pass through multiple services or modules to be completed. The reqId is used to identify and associate all related operations of this request, allowing us to track and understand the complete execution path of the request.
Here are some primary usage of reqId:
- **Request Tracing**: By associating the same reqId with all related operations of a request, we can trace the complete path of the request within the system.
- **Performance Analysis**: By analyzing a request's reqId, we can understand the processing time of the request across various services or modules, thereby identifying performance bottlenecks.
- **Fault Diagnosis**: When a request fails, we can identify the location of the issue by examining the reqId associated with that request.
If the user does not set a reqId, the client library will generate one randomly internally, but it is still recommended for the user to set it, as it can better associate with the user's request.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -15,9 +15,9 @@ Currently, TDengine's native interface client libraries can support platforms su
| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- |
| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● |
| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● |
| **X86 64bit** | **macOS** | | ● | ● | ○ | ○ | ● | ● |
| **X86 64bit** | **macOS** | | ● | ● | ○ | ○ | ● | ● |
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
| **ARM64** | **macOS** | | ● | ● | ○ | ○ | ● | ● |
| **ARM64** | **macOS** | | ● | ● | ○ | ○ | ● | ● |
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.
@ -59,9 +59,9 @@ The different database framework specifications for various programming language
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
| **Connection Management** | Support | Support | Support | Support | Support | Support |
| **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Supported | Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Supported | Supported | Supported | Not Supported | Not Supported | Not Supported |
| **Parameter Binding** | Support | Support | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Support | Support | Support | Support | Not Supported | Support |
| **Schemaless** | Support | Support | Support | Support | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
:::warning

View File

@ -173,12 +173,6 @@ Query OK, 8 row(s) in set (0.001154s)
Before running the TDengine CLI, ensure that the taosd process has been stopped on the dnode that you want to delete.
```sql
DROP DNODE "fqdn:port";
```
or
```sql
DROP DNODE dnodeId;
```

View File

@ -243,7 +243,7 @@ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
{"code":0,"column_meta":[["name","VARCHAR",64]],"data":[["information_schema"],["performance_schema"],["test"],["test1"]],"rows":4}
```
## Test cluster
## Test cluster
### Data preparation
@ -335,7 +335,7 @@ tdengine-1 1/1 Running 1 (6m48s ago) 20m 10.244.0.59 node84
tdengine-2 1/1 Running 0 21m 10.244.1.223 node85 <none> <none>
```
At this time, the cluster mnode has a re-election, and the monde on dnode1 becomes the leader.
At this time, the cluster mnode has a re-election, and the monde on dnode2 becomes the leader.
```Bash
kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"

View File

@ -60,7 +60,7 @@ database_option: {
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
- REPLICA: specifies the number of replicas that are made of the database. Enter 1, 2 or 3. The default value is 1. 2 is only available in TDengine Enterprise since version 3.3.0.0. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
- 1: WAL is enabled but fsync is disabled.
- 2: WAL and fsync are both enabled.

View File

@ -25,7 +25,7 @@ create_definition:
col_name column_definition
column_definition:
type_name [comment 'string_value']
type_name [comment 'string_value'] [PRIMARY KEY] [ENCODE 'encode_type'] [COMPRESS 'compress_type'] [LEVEL 'level_type']
table_options:
table_option ...
@ -41,18 +41,21 @@ table_option: {
**More explanations**
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
2. The maximum length of the table name is 192 bytes.
3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR/GEOMETRY column are also counted.
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
5. The maximum length in bytes must be specified when using BINARY/NCHAR/GEOMETRY types.
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
2. In addition to the timestamp primary key column, an additional primary key column can be specified using the `PRIMARY KEY` keyword. The second column specified as the primary key must be of type integer or string (varchar).
3. The maximum length of the table name is 192 bytes.
4. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR/GEOMETRY column are also counted.
5. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
6. The maximum length in bytes must be specified when using BINARY/NCHAR/GEOMETRY types.
7. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally.
Only ASCII visible characters can be used with escape character.
8. For the details of using `ENCODE` and `COMPRESS`, please refer to [Encode and Compress for Column](../compress).
**Parameter description**
1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables.
1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables. The maximum length of the comment is 1024 bytes.
2. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
3. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
3. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The value range is [0, 2147483647]. The default value is 0, i.e. never expire.
## Create Subtables
@ -103,10 +106,17 @@ alter_table_option: {
**More explanations**
You can perform the following modifications on existing tables:
1. ADD COLUMN: adds a column to the supertable.
2. DROP COLUMN: deletes a column from the supertable.
3. MODIFY COLUMN: changes the length of the data type specified for the column. Note that you can only specify a length greater than the current length.
4. RENAME COLUMN: renames a specified column in the table.
5. The primary key column of a table cannot be modified or added or deleted using ADD/DROP COLUMN.
**Parameter description**
1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables. The maximum length of the comment is 1024 bytes.
2. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The value range is [0, 2147483647]. The default value is 0, i.e. never expire.
### Add a Column
@ -132,6 +142,18 @@ ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length);
ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name
```
### Alter Table TTL
```sql
ALTER TABLE tb_name TTL value
```
### Alter Table Comment
```sql
ALTER TABLE tb_name COMMENT 'string_value'
```
## Modify a Subtable
```sql
@ -152,14 +174,32 @@ alter_table_option: {
```
**More explanations**
1. Only the value of a tag can be modified directly. For all other modifications, you must modify the supertable from which the subtable was created.
**Parameter description**
1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables. The maximum length of the comment is 1024 bytes.
2. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The value range is [0, 2147483647]. The default value is 0, i.e. never expire.
### Change Tag Value Of Sub Table
```
ALTER TABLE tb_name SET TAG tag_name=new_tag_value;
```
### Alter Table TTL
```sql
ALTER TABLE tb_name TTL value
```
### Alter Table Comment
```sql
ALTER TABLE tb_name COMMENT 'string_value'
```
## Delete a Table
The following SQL statement deletes one or more tables.
@ -168,6 +208,8 @@ The following SQL statement deletes one or more tables.
DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
```
**Note**Dropping a table doesn't release the disk space occupied by the table, instead all the rows in the table are marked as deleted, so these data will not occur when querying. The disk space will be released when the system automatically performs `compact` operation or the user performs `compact` manually.
## View Tables
### View All Tables

View File

@ -13,17 +13,29 @@ create_definition:
col_name column_definition
column_definition:
type_name
type_name [comment 'string_value'] [PRIMARY KEY] [ENCODE 'encode_type'] [COMPRESS 'compress_type'] [LEVEL 'level_type']
table_options:
table_option ...
table_option: {
COMMENT 'string_value'
| SMA(col_name [, col_name] ...)
| TTL value
}
```
**More explanations**
- Each supertable can have a maximum of 4096 columns, including tags. The minimum number of columns is 3: a timestamp column used as the key, one tag column, and one data column.
- The TAGS keyword defines the tag columns for the supertable. The following restrictions apply to tag columns:
1. Each supertable can have a maximum of 4096 columns, including tags. The minimum number of columns is 3: a timestamp column used as the key, one tag column, and one data column.
2. Since version 3.3.0.0, besides the timestamp, you can specify another column as primary key using `PRIMARY KEY` keyword, the column specified using `primary key` must be type of integer or varchar.
2. The TAGS keyword defines the tag columns for the supertable. The following restrictions apply to tag columns:
- A tag column can use the TIMESTAMP data type, but the values in the column must be fixed numbers. Timestamps including formulae, such as "now + 10s", cannot be stored in a tag column.
- The name of a tag column cannot be the same as the name of any other column.
- The name of a tag column cannot be a reserved keyword.
- Each supertable must contain between 1 and 128 tags. The total length of the TAGS keyword cannot exceed 16 KB.
- For more information about table parameters, see Create a Table.
3. Regarding how to use `ENCODE` and `COMPRESS`, please refer to [Encode and Compress for Column](../compress).
3. For more information about table parameters, see [Create a Table](../table).
## View a Supertable
@ -111,6 +123,8 @@ DROP STABLE [IF EXISTS] [db_name.]stb_name
Note: Deleting a supertable will delete all subtables created from the supertable, including all data within those subtables.
**Note**Dropping a supertable doesn't release the disk space occupied by the table, instead all the rows in the table are marked as deleted, so these data will not occur when querying. The disk space will be released when the system automatically performs `compact` operation or the user performs `compact` manually.
## Modify a Supertable
```sql
@ -147,6 +161,7 @@ Modifications to the table schema of a supertable take effect on all subtables w
- DROP TAG: deletes a tag from the supertable. When you delete a tag from a supertable, it is automatically deleted from all subtables within the supertable.
- MODIFY TAG: modifies the definition of a tag in the supertable. You can use this keyword to change the length of a BINARY or NCHAR tag column. Note that you can only specify a length greater than the current length.
- RENAME TAG: renames a specified tag in the supertable. When you rename a tag in a supertable, it is automatically renamed in all subtables within the supertable.
- Like odinary tables, the primary key of a supertable cannot be modified or added or deleted using ADD/DROP COLUMN.
### Add a Column

View File

@ -57,6 +57,7 @@ INSERT INTO
```
6. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
7. The primary key column value must be specified and cannot be NULL.
**Normal Syntax**
1. The USING clause automatically creates the specified subtable if it does not exist. If it's unknown whether the table already exists, the table can be created automatically while inserting using the SQL statement below. To use this functionality, a STable must be used as template and tag values must be provided. Any tags that you do not specify will be assigned a null value.

View File

@ -58,6 +58,8 @@ window_clause: {
SESSION(ts_col, tol_val)
| STATE_WINDOW(col)
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
| COUNT_WINDOW(count_val[, sliding_val])
interp_clause:
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
@ -95,6 +97,7 @@ The list of currently supported Hints is as follows:
| PARTITION_FIRST| None | Use Partition before aggregate, conflict with SORT_FOR_GROUP | With normal column in partition by list |
| PARA_TABLES_SORT| None | When sorting the supertable rows by timestamp, No temporary disk space is used. When there are numerous tables, each with long rows, the corresponding algorithm associated with this prompt may consume a substantial amount of memory, potentially leading to an Out Of Memory (OOM) situation. | Sorting the supertable rows by timestamp |
| SMALLDATA_TS_SORT| None | When sorting the supertable rows by timestamp, if the length of query columns >= 256, and there are relatively few rows, this hint can improve performance. | Sorting the supertable rows by timestamp |
| SKIP_TSMA| None| To explicitly disable tsma optimization for select query|Select query with agg funcs|
For example:
@ -145,6 +148,11 @@ You can query tag columns in supertables and subtables and receive results in th
SELECT location, groupid, current FROM d1001 LIMIT 2;
```
### Alias Name
The naming rules for aliases are the same as those for columns, and it supports directly specifying Chinese aliases in UTF-8 encoding format.
### Distinct Values
The DISTINCT keyword returns only values that are different over one or more columns. You can use the DISTINCT keyword with tag columns and data columns.
@ -275,7 +283,7 @@ The GROUP BY clause does not guarantee that the results are ordered. If you want
The PARTITION BY clause is a TDengine-specific extension to standard SQL introduced in TDengine 3.0. This clause partitions data based on the part_list and performs computations per partition.
PARTITION BY and GROUP BY have similar meanings. They both group data according to a specified list and then perform calculations. The difference is that PARTITION BY does not have various restrictions on the SELECT list of the GROUP BY clause. Any operation can be performed within the group (constants, aggregations, scalars, expressions, etc.). Therefore, PARTITION BY is fully compatible with GROUP BY in terms of usage. All places that use the GROUP BY clause can be replaced with PARTITION BY.
PARTITION BY and GROUP BY have similar meanings. They both group data according to a specified list and then perform calculations. The difference is that PARTITION BY does not have various restrictions on the SELECT list of the GROUP BY clause. Any operation can be performed within the group (constants, aggregations, scalars, expressions, etc.). Therefore, PARTITION BY is fully compatible with GROUP BY in terms of usage. All places that use the GROUP BY clause can be replaced with PARTITION BY, there may be differences in the query results while no aggregation function in the query.
Because PARTITION BY does not require returning a row of aggregated data, it can also support various window operations after grouping slices. All window operations that need to be grouped can only use the PARTITION BY clause.
@ -436,7 +444,7 @@ FROM temp_ctable t1 LEFT ASOF JOIN temp_stable t2
ON t1.ts = t2.ts AND t1.deviceid = t2.deviceid;
```
For more information about JOIN operations, please refer to the page [TDengine Join] (../join).
For more information about JOIN operations, please refer to the page [TDengine Join](../join).
## Nested Query
@ -451,6 +459,7 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
- The result of a nested query is returned as a virtual table used by the outer query. It's recommended to give an alias to this table for the convenience of using it in the outer query.
- Outer queries support directly referencing columns or pseudo-columns of inner queries in the form of column names or \`column names\`.
- JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query.
- The features that can be used in the inner query are the same as those that can be used in a non-nested query.
- `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query.

View File

@ -6,6 +6,8 @@ description: This document describes how to delete data from TDengine.
TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure.
**Note**Deletting some data doesn't release the disk space occupied by the table, instead all the rows in the table are marked as deleted, so these data will not occur when querying. The disk space will be released when the system automatically performs `compact` operation or the user performs `compact` manually.
**Syntax:**
```sql

View File

@ -398,7 +398,7 @@ Conversion functions change the data type of a value.
CAST(expr AS type_name)
```
**Description**: Convert the input data `expr` into the type specified by `type_name`. This function can be used only in SELECT statements.
**Description**: Convert the input data `expr` into the type specified by `type_name`.
**Return value type**: The type specified by parameter `type_name`
@ -435,8 +435,7 @@ TO_ISO8601(expr [, timezone])
**More explanations**:
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
- The precision of the input timestamp will be recognized automatically according to the precision of the table used, milliseconds will be used if no table is specified.
#### TO_JSON
@ -503,38 +502,38 @@ TO_CHAR(ts, format_str_literal)
**Supported Formats**
| **Format** | **Comment**| **example** |
| --- | --- | --- |
|AM,am,PM,pm| Meridiem indicator(without periods) | 07:00:00am|
|A.M.,a.m.,P.M.,p.m.| Meridiem indicator(with periods)| 07:00:00a.m.|
|YYYY,yyyy|year, 4 or more digits| 2023-10-10|
|YYY,yyy| year, last 3 digits| 023-10-10|
|YY,yy| year, last 2 digits| 23-10-10|
|Y,y| year, last digit| 3-10-10|
|MONTH|full uppercase of month| 2023-JANUARY-01|
|Month|full capitalized month| 2023-January-01|
|month|full lowercase of month| 2023-january-01|
|MON| abbreviated uppercase of month(3 char)| JAN, SEP|
|Mon| abbreviated capitalized month| Jan, Sep|
|mon|abbreviated lowercase of month| jan, sep|
|MM,mm|month number 01-12|2023-01-01|
|DD,dd|month day, 01-31||
|DAY|full uppercase of week day|MONDAY|
|Day|full capitalized week day|Monday|
|day|full lowercase of week day|monday|
|DY|abbreviated uppercase of week day|MON|
|Dy|abbreviated capitalized week day|Mon|
|dy|abbreviated lowercase of week day|mon|
|DDD|year day, 001-366||
|D,d|week day number, 1-7, Sunday(1) to Saturday(7)||
|HH24,hh24|hour of day, 00-23|2023-01-30 23:59:59|
|hh12,HH12, hh, HH| hour of day, 01-12|2023-01-30 12:59:59PM|
|MI,mi|minute, 00-59||
|SS,ss|second, 00-59||
|MS,ms|milli second, 000-999||
|US,us|micro second, 000000-999999||
|NS,ns|nano second, 000000000-999999999||
|TZH,tzh|time zone hour|2023-01-30 11:59:59PM +08|
| **Format** | **Comment** | **example** |
| ------------------- | ---------------------------------------------- | ------------------------- |
| AM,am,PM,pm | Meridiem indicator(without periods) | 07:00:00am |
| A.M.,a.m.,P.M.,p.m. | Meridiem indicator(with periods) | 07:00:00a.m. |
| YYYY,yyyy | year, 4 or more digits | 2023-10-10 |
| YYY,yyy | year, last 3 digits | 023-10-10 |
| YY,yy | year, last 2 digits | 23-10-10 |
| Y,y | year, last digit | 3-10-10 |
| MONTH | full uppercase of month | 2023-JANUARY-01 |
| Month | full capitalized month | 2023-January-01 |
| month | full lowercase of month | 2023-january-01 |
| MON | abbreviated uppercase of month(3 char) | JAN, SEP |
| Mon | abbreviated capitalized month | Jan, Sep |
| mon | abbreviated lowercase of month | jan, sep |
| MM,mm | month number 01-12 | 2023-01-01 |
| DD,dd | month day, 01-31 | |
| DAY | full uppercase of week day | MONDAY |
| Day | full capitalized week day | Monday |
| day | full lowercase of week day | monday |
| DY | abbreviated uppercase of week day | MON |
| Dy | abbreviated capitalized week day | Mon |
| dy | abbreviated lowercase of week day | mon |
| DDD | year day, 001-366 | |
| D,d | week day number, 1-7, Sunday(1) to Saturday(7) | |
| HH24,hh24 | hour of day, 00-23 | 2023-01-30 23:59:59 |
| hh12,HH12, hh, HH | hour of day, 01-12 | 2023-01-30 12:59:59PM |
| MI,mi | minute, 00-59 | |
| SS,ss | second, 00-59 | |
| MS,ms | milli second, 000-999 | |
| US,us | micro second, 000000-999999 | |
| NS,ns | nano second, 000000000-999999999 | |
| TZH,tzh | time zone hour | 2023-01-30 11:59:59PM +08 |
**More explanations**:
- The output format of `Month`, `Day` are left aligined, like`2023-OCTOBER -01`, `2023-SEPTEMBER-01`, `September` is the longest, no paddings. Week days are slimilar.
@ -650,6 +649,7 @@ use_current_timezone: {
- Time unit specified by `time_unit` can be:
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
- The precision of the returned timestamp is same as the precision set for the current data base in use
- The precision of the input timestamp will be recognized automatically according to the precision of the table used, milliseconds will be used if no table is specified.
- If the input data is not formatted as a timestamp, the returned value is null.
- When using 1d/1w as the time unit to truncate timestamp, you can specify whether to truncate based on the current time zone by setting the use_current_timezone parameter.
Value 0 indicates truncation using the UTC time zone, value 1 indicates truncation using the current time zone.
@ -955,6 +955,7 @@ FIRST(expr)
- FIRST(\*) can be used to get the first non-null value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), FIRST(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
- NULL will be returned if all the values of the specified column are all NULL
- A result will NOT be returned if all the columns in the result set are all NULL
- For a table with composite primary key, the data with the smallest primary key value is returned.
### INTERP
@ -988,6 +989,7 @@ ignore_null_values: {
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
- For a table with composite primary key, onley the data with the smallest primary key value is used to generate interpolation.
**Example**
@ -1017,6 +1019,7 @@ LAST(expr)
- LAST(\*) can be used to get the last non-NULL value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), LAST(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
- If the values of a column in the result set are all NULL, NULL is returned for that column; if all columns in the result are all NULL, no result will be returned.
- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
- For a table with composite primary key, the data with the largest primary key value is returned.
### LAST_ROW
@ -1038,6 +1041,7 @@ LAST_ROW(expr)
- LAST_ROW(\*) can be used to get the last value of all columns; When querying a super table and multiResultFunctionStarReturnTags is set to 0 (default), LAST_ROW(\*) only returns columns of super table; When set to 1, returns columns and tags of the super table.
- When it's used on a STable, if there are multiple values with the timestamp in the result set, one of them will be returned randomly and it's not guaranteed that the same value is returned if the same query is run multiple times.
- Can't be used with `INTERVAL`.
- Like `LAST`, the data with the largest primary key value is returned for a table with composite primary key.
### MAX
@ -1144,7 +1148,7 @@ TOP(expr, k)
UNIQUE(expr)
```
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword.
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. For a table with composite primary key, only the data with the smallest primary key value is returned.
**Return value type**:Same as the data type of the column being operated upon
@ -1163,7 +1167,7 @@ TDengine includes extensions to standard SQL that are intended specifically for
CSUM(expr)
```
**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows.
**Description**: The cumulative sum of each row for a specific column, NULL value will be discard.
**Return value type**: Long integer for integers; Double for floating points. uint64_t for unsigned integers
@ -1190,7 +1194,7 @@ ignore_negative: {
}
```
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored. For tables with composite primary key, the data with the smallest primary key value is used to calculate the derivative.
**Return value type**: DOUBLE
@ -1205,27 +1209,40 @@ ignore_negative: {
### DIFF
```sql
DIFF(expr [, ignore_negative])
DIFF(expr [, ignore_option])
ignore_negative: {
ignore_option: {
0
| 1
| 2
| 3
}
```
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
**Description**: The difference of each row with its previous row for a specific column. `ignore_option` takes the value of 0|1|2|3, the default value is 0 if it's not specified.
- `0` means that negative values (diff results) are not ignored and null values are not ignored
- `1` means that negative values (diff results) are treated as null values
- `2` means that negative values (diff results) are not ignored but null values are ignored
- `3` means that negative values (diff results) are ignored and null values are ignored
- For tables with composite primary key, the data with the smallest primary key value is used to calculate the difference.
**Return value type**:Same as the data type of the column being operated upon
**Return value type**: `bool`, `timestamp` and `integer` value type all return `int_64`, `float` type returns `double`; if the diff result overflows, it is returned as overflow.
**Applicable data types**: Numeric
**Applicable data types**: Numeric type, timestamp and bool type.
**Applicable table types**: standard tables and supertables
**More explanation**:
- The number of result rows is the number of rows subtracted by one, no output for the first row
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
- diff is to calculate the difference of a specific column in current row and the **first valid data before the row**. The **first valid data before the row** refers to the most adjacent non-null value of same column with smaller timestamp.
- The diff result of numeric type is the corresponding arithmatic difference; the timestamp is calculated based on the timestamp precision of the database; when calculating diff, `true` is treated as 1 and `false` is treated as 0
- If the data of current row is NULL or can't find the **first valid data before the current row**, the diff result is NULL
- When ignoring negative values (ignore_option is set to 1 or 3), if the diff result is negative, the result is set to null, and then filtered according to the null value filtering rule
- When the diff result has an overflow, whether to ignore the negative value depends on the result of the logical operation is positive or negative. For example, the value of 9223372036854775800 - (-9223372036854775806) exceeds the range of BIGINT, and the diff result will display the overflow value -10, but it will not be ignored as a negative value
- Single or multiple diffs can be used in a single statement, and for each diff you can specify same or different `ignore_option`. When there are multiple diffs in a single statement, when and only when all the diff results are NULL for a row and each diff's `ignore_option` is specified as ignoring NULL, the output of this row will be removed from the result set.
- Can be used with the selected associated columns. For example: `select _rowts, DIFF()`.
- When there is not composite primary key, if there are the same timestamps across different subtables, it will prompt "Duplicate timestamps not allowed"
- When using with composite primary key, there may be same combination of timestamp and complete primary key across sub-tables, which row will be used depends on which row is found first, that means the result of running diff() multiple times may be different in such a case
### IRATE
@ -1233,7 +1250,7 @@ ignore_negative: {
IRATE(expr)
```
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values. For tables with composite primary key, the data with the smallest primary key value is used to calculate the rate.
**Return value type**: DOUBLE
@ -1323,7 +1340,7 @@ STATEDURATION(expr, oper, val, unit)
TWA(expr)
```
**Description**: Time weighted average on a specific column within a time range
**Description**: Time weighted average on a specific column within a time range. For tables with composite primary key, the data with the smallest primary key value is used to calculate the average.
**Return value type**: DOUBLE

View File

@ -31,25 +31,25 @@ A PARTITION BY clause is processed as follows:
select _wstart, location, max(current) from meters partition by location interval(10m)
```
The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data. For example, calculate the average voltage of each meter every 10 minutes£º
The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data. For example, calculate the average voltage of each meter every 10 minutes:
```sql
select _wstart, tbname, avg(voltage) from meters partition by tbname interval(10m)
```
## Windowed Queries
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are four kinds of windows: time window, status window, session window, and event window. There are two kinds of time windows: sliding window and flip time/tumbling window. The syntax of window clause is as follows:
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are five kinds of windows: time window, status window, session window, event window, and count window. There are two kinds of time windows: sliding window and flip time/tumbling window. The syntax of window clause is as follows:
```sql
window_clause: {
SESSION(ts_col, tol_val)
| STATE_WINDOW(col)
| INTERVAL(interval_val [, offset]) [SLIDING (sliding_value)] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_value)] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
}
```
Both interval_val and sliding_value are time durations which have 3 forms of representation.
Both interval_val and sliding_value are time durations, and interval_offset is the window offset, interval_offset must be less than interval_val, There are 3 forms of representation.
- INTERVAL(1s, 500a) SLIDING(1s), the unit char should be any one of a (millisecond), b (nanosecond), d (day), h (hour), m (minute), n (month), s (second), u (microsecond), w (week), y (year).
- INTERVAL(1000, 500) SLIDING(1000), the unit will the same as the queried database, if there are more than one databases, higher precision will be used.
- INTERVAL('1s', '500a') SLIDING('1s'), unit must be specified, no spaces allowed.
@ -80,7 +80,7 @@ These pseudocolumns occur after the aggregation clause.
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
1. NONE: No fill (the default fill mode)
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled. If multiple columns in select list need to be filled, then in the fill clause there must be a fill value for each of these columns, for example, `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`.
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
4. NULL: Fill with NULL, `FILL(NULL)`
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`

View File

@ -11,13 +11,14 @@ Because stream processing is built in to TDengine, you are no longer reliant on
## Create a Stream
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
IGNORE UPDATE [0|1]
}
```
@ -29,10 +30,10 @@ subquery: SELECT [DISTINCT] select_list
from_clause
[WHERE condition]
[PARTITION BY tag_list]
[window_clause]
window_clause
```
Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME.
Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME. If the source table has a composite primary key, state windows, event windows, and count windows are not supported.
Subtable Clause defines the naming rules of auto-created subtable, you can see more details in below part: Partitions of Stream.
@ -46,7 +47,7 @@ window_clause: {
}
```
`SESSION` indicates a session window, and `tol_val` indicates the maximum range of the time interval. If the time interval between two continuous rows are within the time interval specified by `tol_val` they belong to the same session window; otherwise a new session window is started automatically.
`SESSION` indicates a session window, and `tol_val` indicates the maximum range of the time interval. If the time interval between two continuous rows are within the time interval specified by `tol_val` they belong to the same session window; otherwise a new session window is started automatically.The `_wend` of this window is the time of the last data plus `tol_val`.
`EVENT_WINDOW` is determined according to the window start condition and the window close condition. The window is started when `start_trigger_condition` is evaluated to true, the window is closed when `end_trigger_condition` is evaluated to true. `start_trigger_condition` and `end_trigger_condition` can be any conditional expressions supported by TDengine and can include multiple columns.
@ -77,9 +78,13 @@ If a stream is created with PARTITION BY clause and SUBTABLE clause, the name of
```sql
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
CREATE STREAM streams0 INTO streamt0 AS SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname EVENT_WINDOW START WITH voltage < 0 END WITH voltage > 9;
CREATE STREAM streams1 IGNORE EXPIRED 1 WATERMARK 100s INTO streamt1 AS SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname COUNT_WINDOW(10);
```
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name(Starting from 3.2.3.0, in order to avoid the expression in subtable being unable to distinguish between different subtables, add '_groupId' to the end of subtable name).
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name(Starting from 3.2.3.0, in order to avoid the expression in subtable being unable to distinguish between different subtables, add '_stableName_groupId' to the end of subtable name).
If the output length exceeds the limitation of TDengine(192), the name will be truncated. If the generated name is occupied by some other table, the creation and writing of the new subtable will be failed.
@ -188,11 +193,32 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
- [unique](../function/#unique)
- [mode](../function/#mode)
## Pause\Resume stream
## Pause Resume stream
1.pause stream
```sql
PAUSE STREAM [IF EXISTS] stream_name;
```
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
2.resume stream
```sql
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
```
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.
## Stream State Backup
The intermediate processing results of stream, a.k.a stream state, need to be persistent on the disk properly during stream processing. The stream state, consisting of multiple files on disk, may be transferred between different computing nodes during the stream processing, as a result of a leader/follower switch or physical computing node offline. You need to deploy the rsync on each physical node to enable the backup and restore processing work, since _ver_.3.3.2.1. To ensure it works correctly, please refer to the following instructions:
1. add the option "snodeAddress" in the configure file
2. add the option "checkpointBackupDir" in the configure file to set the backup data directory.
3. create a _snode_ before creating a stream to ensure the backup service is activated. Otherwise, the checkpoint may not generated during the stream procedure.
>snodeAddress 127.0.0.1:873
>
>checkpointBackupDir /home/user/stream/backup/checkpoint/
## create snode
The snode, stream node for short, on which the aggregate tasks can be deployed on, is a stateful computing node dedicated to the stream processing. An important feature is to backup and restore the stream state files. The snode needs to be created before creating stream tasks. Use the following SQL statement to create a snode in a TDengine cluster, and only one snode is allowed in a TDengine cluster for now.
```sql
CREATE SNODE ON DNODE id
```
is the ordinal number of a dnode, which can be acquired by using ```show dnodes``` statement.

View File

@ -18,7 +18,7 @@ description: This document describes the usage of escape characters in TDengine.
## Restrictions
1. If there are escape characters in identifiers (database name, table name, column name)
1. If there are escape characters in identifiers (database name, table name, column name, alias Name)
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
- Identifier quoted with ``: Original content is kept, no escaping
2. If there are escape characters in values

View File

@ -151,7 +151,7 @@ The following list shows all reserved keywords:
- INTERVAL
- INTO
- IS
- ISNULL
- IS NULL
### J
@ -197,7 +197,7 @@ The following list shows all reserved keywords:
- NMATCH
- NONE
- NOT
- NOTNULL
- NOT NULL
- NOW
- NULL
- NULLS

View File

@ -27,10 +27,10 @@ The preceding SQL command shows all dnodes in the cluster with the ID, endpoint,
## Delete a DNODE
```sql
DROP DNODE {dnode_id | dnode_endpoint}
DROP DNODE dnode_id
```
You can delete a dnode by its ID or by its endpoint. Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted.
Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted.
## Modify Dnode Configuration

View File

@ -210,9 +210,13 @@ Provides information about TDengine users. Users whose SYSINFO attribute is 0 ca
| # | **Column** | **Data Type** | **Description** |
| --- | :---------: | ------------- | ---------------- |
| 1 | user_name | VARCHAR(23) | User name |
| 2 | privilege | VARCHAR(256) | User permissions |
| 3 | create_time | TIMESTAMP | Creation time |
| 1 | name | VARCHAR(24) | User name |
| 2 | super | TINYINT | Wether user is super user. 1 means yes; 0 means no. |
| 3 | enable | TINYINT | Wether user is enabled. 1 means yes; 0 means no. |
| 4 | sysinfo | TINYINT | Wether user can query system info. 1 means yes; 0 means no. |
| 5 | create_time | TIMESTAMP | Create time |
| 6 | allowed_host | VARCHAR(49152)| IP whitelist |
## INS_GRANTS

View File

@ -39,10 +39,10 @@ This is an example:
```sql
taos> show users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
name | super | enable | sysinfo | createdb | create_time | allowed_host |
=========================================================================================================
test | 0 | 1 | 1 | 0 |2022-08-29 15:10:27.315 | 127.0.0.1 |
root | 1 | 1 | 1 | 1 |2022-08-29 15:03:34.710 | 127.0.0.1 |
Query OK, 2 rows in database (0.001657s)
```
@ -50,10 +50,10 @@ Alternatively, you can get the user information by querying a built-in table, IN
```sql
taos> select * from information_schema.ins_users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
name | super | enable | sysinfo | createdb | create_time | allowed_host |
=========================================================================================================
test | 0 | 1 | 1 | 0 |2022-08-29 15:10:27.315 | 127.0.0.1 |
root | 1 | 1 | 1 | 1 |2022-08-29 15:03:34.710 | 127.0.0.1 |
Query OK, 2 rows in database (0.001953s)
```
@ -67,17 +67,19 @@ DROP USER user_name;
```sql
ALTER USER user_name alter_user_clause
alter_user_clause: {
PASS 'literal'
| ENABLE value
| SYSINFO value
| CREATEDB value
}
```
- PASS: Modify the user password.
- ENABLE: Specify whether the user is enabled or disabled. 1 indicates enabled and 0 indicates disabled.
- SYSINFO: Specify whether the user can query system information. 1 indicates that the user can query system information and 0 indicates that the user cannot query system information.
- CREATEDB: Specify whether the user can create databases. 1 indicates that the user can create databases and 0 indicates that the user cannot create databases.
For example, you can use below command to disable user `test`:
@ -89,53 +91,4 @@ Query OK, 0 of 0 rows affected (0.001160s)
## Grant Permissions
```sql
GRANT privileges ON priv_level TO user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
Grant permissions to a user, this feature is only available in enterprise edition.
Permissions are granted on the database level. You can grant read or write permissions.
TDengine has superusers and standard users. The default superuser name is root. This account has all permissions. You can use the superuser account to create standard users. With no permissions, standard users can create databases and have permissions on the databases that they create. These include deleting, modifying, querying, and writing to their own databases. Superusers can grant users permission to read and write other databases. However, standard users cannot delete or modify databases created by other users.
For non-database objects such as users, dnodes, and user-defined functions, standard users have read permissions only, generally by means of the SHOW statement. Standard users cannot create or modify these objects.
## Revoke Permissions
```sql
REVOKE privileges ON priv_level FROM user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
Revoke permissions from a user, this feature is only available in enterprise edition.
Permission control is only available in TDengine Enterprise, please contact TDengine sales team.

View File

@ -53,7 +53,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
For more information about user-defined functions, see [User-Defined Functions](../../develop/udf).
For more information about user-defined functions, see [User-Defined Functions](https://docs.tdengine.com/develop/udf/).
## Manage UDF

View File

@ -28,9 +28,9 @@ In the function list, you can only specify supported aggregate functions (see be
Since the output of TSMA is a super table, the row length of the output table is subject to the maximum row length limit. The size of the `intermediate results of different functions` varies, but they are generally larger than the original data size. If the row length of the output table exceeds the maximum row length limit, an error `Row length exceeds max length` will be reported. In this case, you need to reduce the number of functions or split commonly used functions groups into multiple TSMA objects.
The window size is limited to [1ms ~ 1h]. The unit of INTERVAL is the same as the INTERVAL clause in the query, such as a (milliseconds), b (nanoseconds), h (hours), m (minutes), s (seconds), u (microseconds).
The window size is limited to [1m ~ 1h]. The unit of INTERVAL is the same as the INTERVAL clause in the query, such as a (milliseconds), b (nanoseconds), h (hours), m (minutes), s (seconds), u (microseconds).
TSMA is a database-level object, but it is globally unique. The number of TSMA that can be created in the cluster is limited by the parameter `maxTsmaNum`, with a default value of 8 and a range of [0-12]. Note that since TSMA background calculation uses stream computing, creating a TSMA will create a stream. Therefore, the number of TSMA that can be created is also limited by the number of existing streams and the maximum number of streams that can be created.
TSMA is a database-level object, but it is globally unique. The number of TSMA that can be created in the cluster is limited by the parameter `maxTsmaNum`, with a default value of 3 and a range of [0-3]. Note that since TSMA background calculation uses stream computing, creating a TSMA will create a stream. Therefore, the number of TSMA that can be created is also limited by the number of existing streams and the maximum number of streams that can be created.
## Supported Functions
| function | comments |
@ -44,7 +44,6 @@ TSMA is a database-level object, but it is globally unique. The number of TSMA t
|count| If you want to use count(*), you should create the count(ts) function|
|spread||
|stddev||
|hyperloglog||
|||
## Drop TSMA
@ -57,6 +56,8 @@ If there are other TSMA created based on the TSMA being deleted, the delete oper
## TSMA Calculation
The calculation result of TSMA is a super table in the same database as the original table, but it is not visible to users. It cannot be deleted and will be automatically deleted when `DROP TSMA` is executed. The calculation of TSMA is done through stream computing, which is a background asynchronous process. The calculation result of TSMA is not guaranteed to be real-time, but it can guarantee eventual correctness.
If there is no data in the original subtable, the corresponding output subtable may not be created. Therefore, in count queries, even if `countAlwaysReturnValue` is configured, the result of this subtable will not be returned.
When there is a large amount of historical data, after creating TSMA, the stream computing will first calculate the historical data. During this period, newly created TSMA will not be used. The calculation will be automatically recalculated when data updates, deletions, or expired data arrive. During the recalculation period, the TSMA query results are not guaranteed to be real-time. If you want to query real-time data, you can use the hint `/*+ skip_tsma() */` in the SQL statement or disable the `querySmaOptimize` parameter to query from the original data.
## Using and Limitations of TSMA
@ -65,6 +66,8 @@ Client configuration parameter: `querySmaOptimize`, used to control whether to u
Client configuration parameter: `maxTsmaCalcDelay`, in seconds, is used to control the acceptable TSMA calculation delay for users. If the calculation progress of a TSMA is within this range from the latest time, the TSMA will be used. If it exceeds this range, it will not be used. The default value is 600 (10 minutes), with a minimum value of 600 (10 minutes) and a maximum value of 86400 (1 day).
Client configuration parameter: `tsmaDataDeleteMark`, in milliseconds, consistent with the stream computing parameter `deleteMark`, is used to control the retention time of intermediate results in stream computing. The default value is 1 day, with a minimum value of 1 hour. Therefore, historical data that is older than the configuration parameter will not have the intermediate results saved in stream computing. If you modify the data within these time windows, the TSMA calculation results will not include the updated results. This means that the TSMA results will be inconsistent with querying the original data.
### Using TSMA Duraing Query
The aggregate functions defined in TSMA can be directly used in most query scenarios. If multiple TSMA are available, the one with the larger window size is preferred. For unclosed windows, the calculation can be done using smaller window TSMA or the original data. However, there are certain scenarios where TSMA cannot be used (see below). In such cases, the entire query will be calculated using the original data.
@ -131,4 +134,4 @@ SHOW [db_name.]TSMAS;
SELECT * FROM information_schema.ins_tsma;
```
If more functions are specified during creation, and the column names are longer, the function list may be truncated when displayed (currently supports a maximum output of 256KB)
If more functions are specified during creation, and the column names are longer, the function list may be truncated when displayed (currently supports a maximum output of 256KB)

View File

@ -191,11 +191,11 @@ Left/Right ASOF Join are supported between super tables, normal tables, child ta
| **Operator** | **Meaning for Left ASOF Join** |
| :-------------: | ------------------------ |
| > | Match rows in the right table whose primary key timestamp is less than and the most closed to the left table's primary key timestamp |
| >= | Match rows in the right table whose primary key timestamp is less than or equal to and the most closed to the left table's primary key timestamp |
| &gt; | Match rows in the right table whose primary key timestamp is less than and the most closed to the left table's primary key timestamp |
| &gt;= | Match rows in the right table whose primary key timestamp is less than or equal to and the most closed to the left table's primary key timestamp |
| = | Match rows in the right table whose primary key timestamp is equal to the left table's primary key timestamp |
| < | Match rows in the right table whose the primary key timestamp is greater than and the most closed to the left table's primary key timestamp |
| <= | Match rows in the right table whose primary key timestamp is greater than or equal to and the most closed to the left table's primary key timestamp |
| &lt; | Match rows in the right table whose the primary key timestamp is greater than and the most closed to the left table's primary key timestamp |
| &lt;= | Match rows in the right table whose primary key timestamp is greater than or equal to and the most closed to the left table's primary key timestamp |
For Right ASOF Join, the above operators have the opposite meaning.

View File

@ -0,0 +1,86 @@
---
title: Configurable Column Compression
description: Configurable column storage compression method
---
Since TDengine 3.3.0.0, more advanced compression feature is introduced, you can specify compression or not, the compression method and compression level for each column.
## Compression Terminology Definition
### Compression Level Definition
- Level 1 Compression: Encoding the data, which is essentially a form of compression
- Level 2 Compression: Compressing data blocks.
### Compression Algorithm Level
In this article, it specifically refers to the level within the secondary compression algorithm, such as zstd, at least 8 levels can be selected, each level has different performance, essentially it is a tradeoff between compression ratio, compression speed, and decompression speed. To avoid the difficulty of choice, it is simplified and defined as the following three levels:
- high: The highest compression ratio, the worst compression speed and decompression speed.
- low: The best compression speed and decompression speed, the lowest compression ratio.
- medium: Balancing compression ratio, compression speed, and decompression speed.
### Compression Algorithm List
- Encoding algorithm list (Level 1 compression): simple8b, bit-packing, delta-i, delta-d, disabled
- Compression algorithm list (Level 2 compression): lz4, zlib, zstd, tsz, xz, disabled
- Default compression algorithm list and applicable range for each data type
| Data Type | Optional Encoding Algorithm | Default Encoding Algorithm | Optional Compression Algorithm|Default Compression Algorithm| Default Compression Level|
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium|
|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
## SQL
### Create Table with Compression
```sql
CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compress_type' [LEVEL 'level'], [, other cerate_definition]...])
```
**Parameter Description**
- tabname: Super table or ordinary table name
- encode_type: Level 1 compression, specific parameters see the above list
- compress_type: Level 2 compression, specific parameters see the above list
- level: Specifically refers to the level of secondary compression, the default value is medium, supports abbreviation as 'h'/'l'/'m'
**Function Description**
- Specify the compression method for the column when creating a table
### Change Compression Method
```sql
ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'ecode_type'] [COMPRESS 'compress_type'] [LEVEL "high"]
```
**Parameter Description**
- tabName: Table name, can be a super table or an ordinary table
- colName: The column to change the compression algorithm, can only be a normal column
**Function Description**
- Change the compression method of the column
### View Compression Method
```sql
DESCRIBE [dbname.]tabName
```
**Function Description**
- Display basic information of the column, including type and compression method
## Compatibility
- Fully compatible with existing data
- Can't be rolled back once you upgrade to 3.3.0.0

View File

@ -43,201 +43,204 @@ Launch `TDinsight.sh` with the command above and restart Grafana, then open Dash
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](../../reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
### cluster\_info table
### taosd\_cluster\_basic table
`cluster_info` table contains cluster information records.
`taosd_cluster_basic` table contains cluster basic information.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|first\_ep|VARCHAR||first ep of cluster|
|first\_ep\_dnode\_id|INT||dnode id or first\_ep|
|version|VARCHAR||tdengine version. such as: 3.0.4.0|
|master\_uptime|FLOAT||days of master's uptime|
|monitor\_interval|INT||monitor interval in second|
|dbs\_total|INT||total number of databases in cluster|
|tbs\_total|BIGINT||total number of tables in cluster|
|stbs\_total|INT||total number of stables in cluster|
|dnodes\_total|INT||total number of dnodes in cluster|
|dnodes\_alive|INT||total number of dnodes in ready state|
|mnodes\_total|INT||total number of mnodes in cluster|
|mnodes\_alive|INT||total number of mnodes in ready state|
|vgroups\_total|INT||total number of vgroups in cluster|
|vgroups\_alive|INT||total number of vgroups in ready state|
|vnodes\_total|INT||total number of vnode in cluster|
|vnodes\_alive|INT||total number of vnode in ready state|
|connections\_total|INT||total number of connections to cluster|
|topics\_total|INT||total number of topics in cluster|
|streams\_total|INT||total number of streams in cluster|
|protocol|INT||protocol version|
|cluster\_id|NCHAR|TAG|cluster id|
|cluster_version|VARCHAR||tdengine version. such as: 3.0.4.0|
|cluster\_id|VARCHAR|TAG|cluster id|
### d\_info table
### taosd\_cluster\_info table
`d_info` table contains dnodes information records.
`taosd_cluster_info` table contains cluster information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|status|VARCHAR||dnode status|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|cluster\_uptime|DOUBLE||seconds of master's uptime|
|dbs\_total|DOUBLE||total number of databases in cluster|
|tbs\_total|DOUBLE||total number of tables in cluster|
|stbs\_total|DOUBLE||total number of stables in cluster|
|dnodes\_total|DOUBLE||total number of dnodes in cluster|
|dnodes\_alive|DOUBLE||total number of dnodes in ready state|
|mnodes\_total|DOUBLE||total number of mnodes in cluster|
|mnodes\_alive|DOUBLE||total number of mnodes in ready state|
|vgroups\_total|DOUBLE||total number of vgroups in cluster|
|vgroups\_alive|DOUBLE||total number of vgroups in ready state|
|vnodes\_total|DOUBLE||total number of vnode in cluster|
|vnodes\_alive|DOUBLE||total number of vnode in ready state|
|connections\_total|DOUBLE||total number of connections to cluster|
|topics\_total|DOUBLE||total number of topics in cluster|
|streams\_total|DOUBLE||total number of streams in cluster|
|grants_expire\_time|DOUBLE||time until grants expire in seconds|
|grants_timeseries\_used|DOUBLE||timeseries used|
|grants_timeseries\_total|DOUBLE||total timeseries|
|cluster\_id|VARCHAR|TAG|cluster id|
### m\_info table
### taosd\_vgroups\_info table
`m_info` table contains mnode information records.
`taosd_vgroups_info` table contains vgroups information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|role|VARCHAR||the role of mnode. leader or follower|
|mnode\_id|INT|TAG|master node id|
|mnode\_ep|NCHAR|TAG|master node endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|tables\_num|DOUBLE||number of tables per vgroup|
|status|DOUBLE||status, value range:unsynced = 0, ready = 1|
|vgroup\_id|VARCHAR|TAG|vgroup id|
|database\_name|VARCHAR|TAG|database for the vgroup|
|cluster\_id|VARCHAR|TAG|cluster id|
### dnodes\_info table
### taosd\_dnodes\_info table
`dnodes_info` table contains dnodes information records.
`taosd_dnodes_info` table contains dnodes information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|uptime|FLOAT||dnode uptime in `days`|
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|cpu\_cores|FLOAT||cpu cores of server|
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|mem\_system|INT||available memory on the server in `KB`|
|mem\_total|INT||total memory of server in `KB`|
|disk\_engine|INT|||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|net\_in|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|net\_out|FLOAT||network throughput rate in byte/s. read from `/proc/net/dev`|
|io\_read|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write|FLOAT||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|FLOAT||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|req\_select|INT||number of select queries received per dnode|
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|req\_insert|INT||number of insert queries received per dnode|
|req\_insert\_success|INT||number of successfully insert queries received per dnode|
|req\_insert\_rate|FLOAT||number of insert queries received per dnode divided by monitor interval|
|req\_insert\_batch|INT||number of batch insertions|
|req\_insert\_batch\_success|INT||number of successful batch insertions|
|req\_insert\_batch\_rate|FLOAT||number of batch insertions divided by monitor interval|
|errors|INT||dnode errors|
|vnodes\_num|INT||number of vnodes per dnode|
|masters|INT||number of master vnodes|
|has\_mnode|INT||if the dnode has mnode|
|has\_qnode|INT||if the dnode has qnode|
|has\_snode|INT||if the dnode has snode|
|has\_bnode|INT||if the dnode has bnode|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|uptime|DOUBLE||dnode uptime in `seconds`|
|cpu\_engine|DOUBLE||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|cpu\_system|DOUBLE||cpu usage of server. read from `/proc/stat`|
|cpu\_cores|DOUBLE||cpu cores of server|
|mem\_engine|DOUBLE||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|mem\_free|DOUBLE||available memory on the server in `KB`|
|mem\_total|DOUBLE||total memory of server in `KB`|
|disk\_used|DOUBLE||usage of data dir in `bytes`|
|disk\_total|DOUBLE||the capacity of data dir in `bytes`|
|system\_net\_in|DOUBLE||network throughput rate in byte/s. read from `/proc/net/dev`|
|system\_net\_out|DOUBLE||network throughput rate in byte/s. read from `/proc/net/dev`|
|io\_read|DOUBLE||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write|DOUBLE||io throughput rate in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_read\_disk|DOUBLE||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|io\_write\_disk|DOUBLE||io throughput rate of disk in byte/s. read from `/proc/<taosd_pid>/io`|
|vnodes\_num|DOUBLE||number of vnodes per dnode|
|masters|DOUBLE||number of master vnodes|
|has\_mnode|DOUBLE||if the dnode has mnode, value range:include=1, not_include=0|
|has\_qnode|DOUBLE||if the dnode has qnode, value range:include=1, not_include=0|
|has\_snode|DOUBLE||if the dnode has snode, value range:include=1, not_include=0|
|has\_bnode|DOUBLE||if the dnode has bnode, value range:include=1, not_include=0|
|error\_log\_count|DOUBLE||error count|
|info\_log\_count|DOUBLE||info count|
|debug\_log\_count|DOUBLE||debug count|
|trace\_log\_count|DOUBLE||trace count|
|dnode\_id|VARCHAR|TAG|dnode id|
|dnode\_ep|VARCHAR|TAG|dnode endpoint|
|cluster\_id|VARCHAR|TAG|cluster id|
### data\_dir table
### taosd\_dnodes\_status table
`data_dir` table contains data directory information records.
`taosd_dnodes_status` table contains dnodes information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||data directory. default is `/var/lib/taos`|
|level|INT||level for multi-level storage|
|avail|BIGINT||available space for data directory in `bytes`|
|used|BIGINT||used space for data directory in `bytes`|
|total|BIGINT||total space for data directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|status|DOUBLE||dnode status, value range:ready=1offline =0|
|dnode\_id|VARCHAR|TAG|dnode id|
|dnode\_ep|VARCHAR|TAG|dnode endpoint|
|cluster\_id|VARCHAR|TAG|cluster id|
### log\_dir table
### taosd\_dnodes\_log\_dir table
`log_dir` table contains log directory information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||log directory. default is `/var/log/taos/`|
|avail|BIGINT||available space for log directory in `bytes`|
|used|BIGINT||used space for data directory in `bytes`|
|total|BIGINT||total space for data directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|avail|DOUBLE||available space for log directory in `bytes`|
|used|DOUBLE||used space for data directory in `bytes`|
|total|DOUBLE||total space for data directory in `bytes`|
|name|VARCHAR|TAG|log directory. default is `/var/log/taos/`|
|dnode\_id|VARCHAR|TAG|dnode id|
|dnode\_ep|VARCHAR|TAG|dnode endpoint|
|cluster\_id|VARCHAR|TAG|cluster id|
### temp\_dir table
### taosd\_dnodes\_data\_dir table
`temp_dir` table contains temp dir information records.
`taosd_dnodes_data_dir` table contains data directory information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|name|NCHAR||temp directory. default is `/tmp/`|
|avail|BIGINT||available space for temp directory in `bytes`|
|used|BIGINT||used space for temp directory in `bytes`|
|total|BIGINT||total space for temp directory in `bytes`|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|avail|DOUBLE||available space for data directory in `bytes`|
|used|DOUBLE||used space for data directory in `bytes`|
|total|DOUBLE||total space for data directory in `bytes`|
|level|VARCHAR|TAG|level for multi-level storage|
|name|VARCHAR|TAG|data directory. default is `/var/lib/taos`|
|dnode\_id|VARCHAR|TAG|dnode id|
|dnode\_ep|VARCHAR|TAG|dnode endpoint|
|cluster\_id|VARCHAR|TAG|cluster id|
### vgroups\_info table
### taosd\_mnodes\_info table
`vgroups_info` table contains vgroups information records.
`taosd_mnodes_info` table contains mnode information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vgroup\_id|INT||vgroup id|
|database\_name|VARCHAR||database for the vgroup|
|tables\_num|BIGINT||number of tables per vgroup|
|status|VARCHAR||status|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|role|DOUBLE||the role of mnode. value range:offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104|
|mnode\_id|VARCHAR|TAG|master node id|
|mnode\_ep|VARCHAR|TAG|master node endpoint|
|cluster\_id|VARCHAR|TAG|cluster id|
### vnodes\_role table
### taosd\_vnodes\_role table
`vnodes_role` table contains vnode role information records.
`taosd_vnodes_role` table contains vnode role information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|vnode\_role|VARCHAR||role. leader or follower|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|role|DOUBLE||role. value range:offline = 0,follower = 100,candidate = 101,leader = 102,error = 103,learner = 104|
|vgroup\_id|VARCHAR|TAG|vgroup id|
|database\_name|VARCHAR|TAG|database for the vgroup|
|dnode\_id|VARCHAR|TAG|dnode id|
|cluster\_id|VARCHAR|TAG|cluster id|
### log\_summary table
### taosd\_sql\_req table
`log_summary` table contains log summary information records.
`taosd_sql_req` tables contains taosd sql records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|error|INT||error count|
|info|INT||info count|
|debug|INT||debug count|
|trace|INT||trace count|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|count|DOUBLE||sql count|
|result|VARCHAR|TAG|sql execution resultvalue range: Success, Failed|
|username|VARCHAR|TAG|user name who executed the sql|
|sql\_type|VARCHAR|TAG|sql typevalue range:inserted_rows|
|dnode\_id|VARCHAR|TAG|dnode id|
|dnode\_ep|VARCHAR|TAG|dnode endpoint|
|vgroup\_id|VARCHAR|TAG|dnode id|
|cluster\_id|VARCHAR|TAG|cluster id|
### grants\_info table
### taos\_sql\_req 表
`grants_info` table contains grants information records.
`taos_sql_req` tables contains taos sql records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|expire\_time|BIGINT||time until grants expire in seconds|
|timeseries\_used|BIGINT||timeseries used|
|timeseries\_total|BIGINT||total timeseries|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
|count|DOUBLE||sql count|
|result|VARCHAR|TAG|sql execution resultvalue range: Success, Failed|
|username|VARCHAR|TAG|user name who executed the sql|
|sql\_type|VARCHAR|TAG|sql typevalue range:select, insertdelete|
|cluster\_id|VARCHAR|TAG|cluster id|
### taos\_slow\_sql 表
`taos_slow_sql` ables contains taos slow sql records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|count|DOUBLE||sql count|
|result|VARCHAR|TAG|sql execution resultvalue range: Success, Failed|
|username|VARCHAR|TAG|user name who executed the sql|
|duration|VARCHAR|TAG|sql execution durationvalue range:3-10s,10-100s,100-1000s,1000s-|
|cluster\_id|VARCHAR|TAG|cluster id|
### keeper\_monitor table

View File

@ -68,7 +68,7 @@ The following return value results indicate that the verification passed.
## HTTP request URL format
```text
http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
```
Parameter Description:
@ -78,6 +78,7 @@ Parameter Description:
- db_name: Optional parameter that specifies the default database name for the executed SQL command.
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
- req_id: Optional parameter that specifies the request id for tracing.
- row_with_meta: Optional parameter that specifies whether each row of data carries the column name. The default value is `false`.(Supported starting from version 3.3.2.0)
:::note
@ -336,6 +337,82 @@ Description:
- code: (`int`) Error code.
- desc: (`string`): Error code description.
#### Return key-value pair
When the parameter `row_with_meta=true` is specified, the data returned in `data` field will change from array format to object format, where the key of the object is the column name and the value is the data.
insert response example:
```json
{
"code": 0,
"column_meta": [
[
"affected_rows",
"INT",
4
]
],
"data": [
{
"affected_rows": 1
}
],
"rows": 1
}
```
query response example:
```json
{
"code": 0,
"column_meta": [
[
"ts",
"TIMESTAMP",
8
],
[
"current",
"FLOAT",
4
],
[
"voltage",
"INT",
4
],
[
"phase",
"FLOAT",
4
],
[
"groupid",
"INT",
4
],
[
"location",
"VARCHAR",
24
]
],
"data": [
{
"ts": "2017-07-14T02:40:00.000Z",
"current": -2.498076,
"voltage": 0,
"phase": -0.846025,
"groupid": 8,
"location": "California.Sunnyvale"
}
],
"rows": 1
}
```
## Custom Authorization Code
HTTP requests require an authorization code `<TOKEN>` for identification purposes. The administrator usually provides the authorization code, and it can be obtained simply by sending an ``HTTP GET`` request as follows:

View File

@ -92,7 +92,7 @@ taosBenchmark -f <json file>
</details>
## Command-line argument in detail
## Command-line arguments in detail
- **-f/--file &lt;json file&gt;** :
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.

View File

@ -58,6 +58,7 @@ And many more parameters.
- -a AUTHSTR: Authorization information to connect to the server.
- -A: Obtain authorization information from username and password.
- -B: Set BI mode , all outputs follow the format of BI tools for output if setting
- -c CONFIGDIR: Specify the directory where configuration file exists. The default is `/etc/taos`, and the default name of the configuration file in this directory is `taos.cfg`
- -C: Print the configuration parameters of `taos.cfg` in the default directory or specified by -c
- -d DATABASE: Specify the database to use when connecting to the server

View File

@ -206,11 +206,11 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
| Attribute | Description |
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Applicable | Server only |
| Applicable | Server and Client |
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
| Vlue Range | 0: Return empty line, 1: Return 0 |
| Value Range | 0: Return empty line, 1: Return 0 |
| Default | 1 |
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
| Notes | When this parameter is setting to 1, for queries containing INTERVAL clause or the queries using TSMA, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values. Server and client use the same value|
### maxNumOfDistinctRes
@ -250,6 +250,15 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
| Value Range | 600s - 86400s, 10 minutes to 1 hour |
| Default value | 600s |
### tsmaDataDeleteMark
| Attribute | Description |
| -------- | --------------------------- |
| Applicable | Client only |
| Meaning | The duration for which the intermediate results of TSMA calculations are saved, in milliseconds |
| Value Range | >= 3600000, greater than or equal to 1 hour |
| Default value | 86400000, 1d |
## Locale Parameters
@ -412,7 +421,7 @@ The charset that takes effect is UTF-8.
| Applicable | Server Only |
| Meaning | Maximum number of vnodes per dnode |
| Value Range | 0-4096 |
| Default Value | 2x the CPU cores |
| Default Value | 2x the CPU cores plus 5 |
## Performance Tuning
@ -423,7 +432,7 @@ The charset that takes effect is UTF-8.
| Applicable | Server Only |
| Meaning | Maximum number of threads to commit |
| Value Range | 0-1024 |
| Default Value | |
| Default Value | 4 |
## Log Parameters
@ -720,6 +729,57 @@ The charset that takes effect is UTF-8.
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
| Default | -1 |
### fPrecision
| Attribute | Description |
| -------- | -------------------------------- |
| Application | Server Only |
| Meaning | Compression precision for float data type |
| Value Range | 0.1 ~ 0.00000001 |
| Default | 0.00000001 |
| Note | The floating value below this setting will be cut off |
### dPrecision
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Server Only |
| Meaning | Compression precision for double data type |
| Value Range | 0.1 ~ 0.0000000000000001 |
| Default | 0.0000000000000001 |
| Note | The floating value below this setting will be cut off |
### lossyColumn
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Server Only |
| Meaning | Enable TSZ lossy compression for float and/or double |
| Value Range | float, double |
| Default | none: disable TSZ lossy compression |
**补充说明**
1. It's only available since 3.2.0.0 version, and can't downgrade to previous version once upgrading to 3.2.0.0 and enabling this parameter
2. TSZ compression algorithm compresses data based on data prediction technique, so it's more suitable for data with specific pattern
3. TSZ compression algorithm may take longer time but it has better compression ratio, so it's suitable when you have enough CPU resources and are more sensitive to disk occupation
4. Example: enable TSZ for both float and double
```shell
lossyColumns float|double
```
5. After configuring, taosd service needs to restarted. After restarting, if you see the following output in taosd logfile, it means the function has been enabled
```sql
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
```
### ifAdtFse
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Server Only |
| Meaning | Replace HUFFMAN with FSE in TSZ, FSE is faster when compressing but slower when uncompressing |
| Value Range | 0: Use HUFFMAN, 1: Use FSE |
| Default | 0: Use HUFFMAN |
## Other Parameters
@ -776,8 +836,8 @@ The charset that takes effect is UTF-8.
| --------- | ----------------------------- |
| Applicable | Server Only |
| Meaning | Max num of TSMAs |
| Value Range | 0-12 |
| Default Value | 8 |
| Value Range | 0-3 |
| Default Value | 3 |
## 3.0 Parameters

View File

@ -27,6 +27,7 @@ where:
- `tag_set` will be used as tags, with format like `<tag_key>=<tag_value>,<tag_key>=<tag_value>` Enter a space between `tag_set` and `field_set`.
- `field_set`will be used as data columns, with format like `<field_key>=<field_value>,<field_key>=<field_value>` Enter a space between `field_set` and `timestamp`.
- `timestamp` is the primary key timestamp corresponding to this row of data
- schemaless writing does not support writing data to tables with a second primary key column.
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
@ -38,24 +39,24 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- If there are double quotes on both sides and a B/b prefix, it means VARBINARY type. Hexadecimal start with \x or string can be used in double quotes. For example `B"\x98f46e"` `B"hello"`.
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
| **Serial number** | **Element** | **Escape characters** |
| -------- | ----------- | ----------------------------- |
| 1 | Measurement | Comma, Space |
| 2 | Tag key | Comma, Equals Sign, Space |
| 3 | Tag value | Comma, Equals Sign, Space |
| 4 | Field key | Comma, Equals Sign, Space |
| 5 | Field value | Double quote, Backslash |
| **Serial number** | **Element** | **Escape characters** |
| ----------------- | ----------- | ------------------------- |
| 1 | Measurement | Comma, Space |
| 2 | Tag key | Comma, Equals Sign, Space |
| 3 | Tag value | Comma, Equals Sign, Space |
| 4 | Field key | Comma, Equals Sign, Space |
| 5 | Field value | Double quote, Backslash |
With two contiguous backslashes, the first is interpreted as an escape character. Examples of backslash escape rules are as follows:
| **Serial number** | **Backslashes** | **Interpreted as** |
| -------- | ----------- | ----------------------------- |
| 1 | \ | \ |
| 2 | \\\\ | \ |
| 3 | \\\\\\ | \\\\ |
| 4 | \\\\\\\\ | \\\\ |
| 5 | \\\\\\\\\\ | \\\\\\ |
| 6 | \\\\\\\\\\\\ | \\\\\\ |
| **Serial number** | **Backslashes** | **Interpreted as** |
| ----------------- | --------------- | ------------------ |
| 1 | \ | \ |
| 2 | \\\\ | \ |
| 3 | \\\\\\ | \\\\ |
| 4 | \\\\\\\\ | \\\\ |
| 5 | \\\\\\\\\\ | \\\\\\ |
| 6 | \\\\\\\\\\\\ | \\\\\\ |
- Numeric types will be distinguished from data types by the suffix.

View File

@ -23,7 +23,7 @@ Methods of installing taosKeeper:
### Configuration
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: Command-line arguments, environment variable and configuration file. The precedence of those is Command-line, environment variable and configuration file.
**Make sure that the TDengine cluster is running correctly before running taosKeeper.** Ensure that the monitoring service in TDengine has been started. At least the values of `monitor` and `monitorFqdn` need to be set in `taos.cfg`.
@ -32,7 +32,7 @@ monitor 1
monitorFqdn localhost # taoskeeper's FQDN
```
For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
For more information, see [TDengine Monitoring Configuration](../config/#monitoring-parameters).
### Quick Launch
@ -137,17 +137,10 @@ port = 6041
username = "root"
password = "taosdata"
# set taosAdapter to monitor
[taosAdapter]
address = ["127.0.0.1:6041","192.168.1.95:6041"]
[metrics]
# monitoring metric prefix
prefix = "taos"
# cluster data identifier
cluster = "production"
# database to store monitoring data
database = "log"
@ -157,6 +150,19 @@ tables = ["normal_table"]
# database options for db storing metrics data
[metrics.databaseoptions]
cachemodel = "none"
[environment]
# Whether running in cgroup.
incgroup = false
[log]
# rotation file num
rotationCount = 5
# rotation on time
rotationTime = "24h"
# rotation on file size (bytes)
rotationSize = 100000000
```
### Obtain Monitoring Metrics
@ -169,16 +175,16 @@ taosKeeper records monitoring metrics generated by TDengine in a specified datab
$ taos
# the log database is used in this example
> use log;
> select * from cluster_info limit 1;
> select * from taosd_cluster_info limit 1;
```
Example result set:
```shell
ts | first_ep | first_ep_dnode_id | version | master_uptime | monitor_interval | dbs_total | tbs_total | stbs_total | dnodes_total | dnodes_alive | mnodes_total | mnodes_alive | vgroups_total | vgroups_alive | vnodes_total | vnodes_alive | connections_total | protocol | cluster_id |
===============================================================================================================================================================================================================================================================================================================================================================================
2022-08-16 17:37:01.629 | hlb:6030 | 1 | 3.0.0.0 | 0.27250 | 15 | 2 | 27 | 38 | 1 | 1 | 1 | 1 | 4 | 4 | 4 | 4 | 14 | 1 | 5981392874047724755 |
Query OK, 1 rows in database (0.036162s)
_ts | cluster_uptime | dbs_total | tbs_total | stbs_total | vgroups_total | vgroups_alive | vnodes_total | vnodes_alive | mnodes_total | mnodes_alive | connections_total | topics_total | streams_total | dnodes_total | dnodes_alive | grants_expire_time | grants_timeseries_used | grants_timeseries_total | cluster_id |
===================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
2024-06-04 03:03:34.341 | 0.000000000000000 | 2.000000000000000 | 1.000000000000000 | 4.000000000000000 | 4.000000000000000 | 4.000000000000000 | 4.000000000000000 | 4.000000000000000 | 1.000000000000000 | 1.000000000000000 | 2.000000000000000 | 0.000000000000000 | 0.000000000000000 | 1.000000000000000 | 1.000000000000000 | 0.000000000000000 | 3.000000000000000 | 0.000000000000000 | 554014120921134497 |
Query OK, 1 row(s) in set (0.001652s)
```
#### Export Monitoring Metrics
@ -190,21 +196,24 @@ $ curl http://127.0.0.1:6043/metrics
Sample result set (excerpt):
```shell
# HELP taos_cluster_info_connections_total
# HELP taos_cluster_info_connections_total
# TYPE taos_cluster_info_connections_total counter
taos_cluster_info_connections_total{cluster_id="5981392874047724755"} 16
# HELP taos_cluster_info_dbs_total
taos_cluster_info_connections_total{cluster_id="554014120921134497"} 8
# HELP taos_cluster_info_dbs_total
# TYPE taos_cluster_info_dbs_total counter
taos_cluster_info_dbs_total{cluster_id="5981392874047724755"} 2
# HELP taos_cluster_info_dnodes_alive
taos_cluster_info_dbs_total{cluster_id="554014120921134497"} 2
# HELP taos_cluster_info_dnodes_alive
# TYPE taos_cluster_info_dnodes_alive counter
taos_cluster_info_dnodes_alive{cluster_id="5981392874047724755"} 1
# HELP taos_cluster_info_dnodes_total
taos_cluster_info_dnodes_alive{cluster_id="554014120921134497"} 1
# HELP taos_cluster_info_dnodes_total
# TYPE taos_cluster_info_dnodes_total counter
taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
# HELP taos_cluster_info_first_ep
taos_cluster_info_dnodes_total{cluster_id="554014120921134497"} 1
# HELP taos_cluster_info_first_ep
# TYPE taos_cluster_info_first_ep gauge
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
taos_cluster_info_first_ep{cluster_id="554014120921134497",value="tdengine:6030"} 1
# HELP taos_cluster_info_first_ep_dnode_id
# TYPE taos_cluster_info_first_ep_dnode_id counter
taos_cluster_info_first_ep_dnode_id{cluster_id="554014120921134497"} 1
```
### check\_health

View File

@ -0,0 +1,53 @@
---
title: Graphic User Interface
sidebar_label: Taos-Explorer
description: User guide about taosExplorer
---
taos-explorer is a web service which provides GUI based interactive database management tool.
## Install
taos-explorer is delivered in the TDengine server package since version 3.3.0.0. After installing TDengine server, you will get the `taos-explorer` service.
## Configure
The configuration file of `taos-explorer` service is `/etc/taos/explorer.toml` on Linux platform, the key items in the configuration are like below:
``` toml
port = 6060
cluster = "http://localhost:6041"
```
The description of these two parameters:
- porttaos-explorer service port
- clusterThe end point of the TDengine cluster for the taos-explorer to manage. It supports only websocket connection, so this address is actually the end point of `taosAdapter` service in the TDengine cluster.
## Start & Stop
Before starting the service, please first make sure the configuration is correct, and the TDengine cluster (majorly including `taosd` and `taosAdapter` services) are already alive and working well.
### Linux
On Linux system you can use `systemctl` to manage the service as below:
- Start the service: `systemctl start taos-explorer`
- Stop the service: `systemctl stop taos-explorer`
- Restart the service: `systemctl restart taos-explorer`
- Check service status: `systemctl status taos-explorer`
## Register & Logon
### Register
After installing, configuring and starting, you can use your browser to access taos-explorer using address `http://ip:6060`. At this time, if you have not registered before, the registering page will first show up. You need to enter your valid enterprise email, receive the activation code, then input the code. Congratulations, you have registered successfully.
### Logon
After registering, you can use your user name and corresponding password in the database system to logon. The default username is `root`, but you can change it to use another one. After loggin into the system, you can view or manage databases, create super tables, create child tables, or view the data in the database.
There are some functionalities only available to enterprise users, you can view and experience but can't really use them.

View File

@ -13,36 +13,30 @@ TDengine can be quickly integrated with the open-source data visualization syste
In order for Grafana to add the TDengine data source successfully, the following preparations are required:
1. The TDengine cluster is deployed and functioning properly
2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
1. Grafana server is installed and running properly. TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: [https://grafana.com/grafana/download](https://grafana.com/grafana/download).
2. The TDengine cluster is deployed and functioning properly
3. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
Record these values:
- TDengine REST API url: `http://tdengine.local:6041`.
- TDengine cluster authorization, with user + password.
## Installing Grafana
TDengine currently supports Grafana versions 7.5 and above. Users can go to the Grafana official website to download the installation package and execute the installation according to the current operating system. The download address is as follows: [https://grafana.com/grafana/download](https://grafana.com/grafana/download).
## Configuring Grafana
### Install Grafana Plugin and Configure Data Source
## Install Grafana Plugin and Configure Data Source
<Tabs defaultValue="script">
<TabItem value="gui" label="With GUI">
Under Grafana 8, plugin catalog allows you to [browse and manage plugins within Grafana](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog) (but for Grafana 7.x, use **With Script** or **Install & Configure Manually**). Find the page at **Configurations > Plugins**, search **TDengine** and click it to install.
![Search tdengine in grafana plugins](./grafana/grafana-plugin-search-tdengine.png)
Installation may cost some minutes, then you can **Create a TDengine data source**:
![Install and configure Grafana data source](./grafana/grafana-install-and-config.png)
Installation may cost some minutes, you can **Create a TDengine data source** when installation finished.
Then you can add a TDengine data source by filling up the configuration options.
![TDengine Database Grafana plugin add data source](./grafana/grafana-data-source.png)
- Host: IP address of the server where the components of the TDengine cluster provide REST service and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`.
- User: TDengine user name.
- Password: TDengine user password.
Click `Save & Test` to test. You should see a success message if the test worked.
You can create dashboards with TDengine now.
@ -77,7 +71,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
```bash
GF_VERSION=3.3.1
GF_VERSION=3.5.2
# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
# from Grafana
@ -96,26 +90,17 @@ If Grafana is running in a Docker environment, the TDengine plugin can be automa
GF_INSTALL_PLUGINS=tdengine-datasource
```
Now users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side, as shown in the following figure.
![TDengine Database TDinsight plugin add datasource 1](./grafana/add_datasource1.webp)
Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it, as shown in the following figure.
![TDengine Database TDinsight plugin add datasource 2](./grafana/add_datasource2.webp)
Now users can log in to the Grafana server (username/password: admin/admin) directly through the URL `http://localhost:3000` and add a datasource through `Configuration -> Data Sources` on the left side.
Click `Add data source` to enter the Add data source page, and enter TDengine in the query box to add it.
Enter the datasource configuration page, and follow the default prompts to modify the corresponding configuration.
![TDengine Database TDinsight plugin add database 3](./grafana/add_datasource3.webp)
- Host: IP address of the server where the components of the TDengine cluster provide REST service (offered by taosd before 2.4 and by taosAdapter since 2.4) and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`.
- Host: IP address of the server where the components of the TDengine cluster provide REST service and the port number of the TDengine REST service (6041), by default use `http://localhost:6041`.
- User: TDengine user name.
- Password: TDengine user password.
Click `Save & Test` to test. You should see a success message if the test worked.
![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp)
</TabItem>
<TabItem value="container" label="Container">
@ -156,7 +141,7 @@ You can setup a zero-configuration stack for TDengine + Grafana by [docker-compo
services:
tdengine:
image: tdengine/tdengine:3.0.2.4
image: tdengine/tdengine:3.3.0.0
environment:
TAOS_FQDN: tdengine
volumes:
@ -186,43 +171,118 @@ Open Grafana (http://localhost:3000), and you can add dashboard with TDengine no
</TabItem>
</Tabs>
### Create Dashboard
:::info
Go back to the main interface to create a dashboard and click Add Query to enter the panel query page:
![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp)
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query.
- INPUT SQL: Enter the desired query (the results being two columns and multiple rows), such as `select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)`. In this statement, $from, $to, and $interval are variables that Grafana replaces with the query time range and interval. In addition to the built-in variables, custom template variables are also supported.
- ALIAS BY: This allows you to set the current query alias.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement.
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
:::note
Since the REST connection because is stateless. Grafana plugin can use &lt;db_name&gt;.&lt;table_name&gt; in the SQL command to specify the database name.
In the following introduction, we take Grafana v11.0.0 as an example. Other versions may have different features, please refer to [Grafana's official website](https://grafana.com/docs/grafana/latest/).
:::
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
## Built-in Variables and Custom Variables
The Variable feature in Grafana is very powerful. It can be used in queries, panel titles, labels, etc., to create more dynamic and interactive Dashboards, improving user experience and efficiency.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard2.webp)
The main functions and characteristics of variables include:
The example to query the average system memory usage for the specified interval on each server as follows.
- Dynamic data query: Variables can be used in query statements, allowing users to dynamically change query conditions by selecting different variable values, thus viewing different data views. This is very useful for scenarios that need to dynamically display data based on user input.
![TDengine Database TDinsight plugin create dashboard 2](./grafana/create_dashboard3.webp)
- Improved reusability: By defining variables, the same configuration or query logic can be reused in multiple places without the need to rewrite the same code. This makes the maintenance and updating of Dashboards simpler and more efficient.
- Flexible configuration options: Variables offer a variety of configuration options, such as predefined static value lists, dynamic value querying from data sources, regular expression filtering, etc., making the application of variables more flexible and powerful.
Grafana provides both built-in variables and custom variables, which can be referenced in SQL writing. We can use `$variableName` to reference the variable, where `variableName` is the name of the variable. For detailed reference, please refer to [Variable reference](https://grafana.com/docs/grafana/latest/dashboards/variables/variable-syntax/).
### Built-in Variables
Grafana has built-in variables such as `from`, `to`, and `interval`, all derived from Grafana plugin panels. Their meanings are as follows:
- `from` is the start time of the query range
- `to` is the end time of the query range
- `interval` represent time spans
It is recommended to set the start and end times of the query range for each query, which can effectively reduce the amount of data scanned by the TDengine server during query execution. `interval` is the size of the window split, which in Grafana version 11, is calculated based on the time range and the number of return points.
In addition to the above three common variables, Grafana also provides variables such as `__timezone`, `__org`, `__user`, etc. For details, please refer to [Built-in Variables](https://grafana.com/docs/grafana/latest/dashboards/variables/add-template-variables/#global-variables).
### Custom Variables
We can add custom variables in the Dashboard. The usage of custom variables is no different from that of built-in variables; they are referenced in SQL with `$variableName`.
Custom variables support multiple types, such as `Query`, `Constant`, `Interval`, `Data source`, etc.
Custom variables can reference other custom variables, for example, one variable represents a region, and another variable can reference the value of the region to query devices in that region.
#### Adding Query Type Variables
In the Dashboard configuration, select `Variables`, then click `New variable`:
1. In the `Name` field, enter your variable name, here we set the variable name as `selected_groups`.
2. In the `Select variable type` dropdown menu, select `Query`.
Depending on the selected variable type, configure the corresponding options. For example, if you choose `Query`, you need to specify the data source and the query statement for obtaining variable values. Here, taking smart meters as an example, we set the query type, select the data source, and configure the SQL as `select distinct(groupid) from power.meters where groupid < 3 and ts > $from and ts < $to;`
3. After clicking `Run Query` at the bottom, you can see the variable values generated based on your configuration in the `Preview of values` section.
4. Other configurations are not detailed here. After completing the configuration, click the `Apply` button at the bottom of the page, then click `Save dashboard` in the top right corner to save.
After completing the above steps, we have successfully added a new custom variable `$selected_groups` to the Dashboard. We can later reference this variable in the Dashboard's queries with `$selected_groups`.
We can also add another custom variable to reference this `selected_groups` variable, for example, we add a query variable named `tbname_max_current`, with the SQL as `select tbname from power.meters where groupid = $selected_groups and ts > $from and ts < $to;`
#### Adding Interval Type Variables
We can customize the time window interval to better fit business needs.
1. In the `Name` field, enter the variable name as `interval`.
2. In the `Select variable type` dropdown menu, select `Interval`.
3. In the `Interval options` enter `1s,2s,5s,10s,15s,30s,1m`.
4. Other configurations are not detailed here. After completing the configuration, click the `Apply` button at the bottom of the page, then click `Save dashboard` in the top right corner to save.
After completing the above steps, we have successfully added a new custom variable `$interval` to the Dashboard. We can later reference this variable in the Dashboard's queries with `$interval`.
## TDengine Time Series Query Support
On top of supporting standard SQL, TDengine also provides a series of special query syntaxes that meet the needs of time series business scenarios, bringing great convenience to the development of applications in time series scenarios.
- `partition by` this clause can split data by certain dimensions and then perform a series of calculations within the split data space, which can replace `group by` in most cases.
- `interval` this clause is used to generate time windows of the same time interval.
- `fill` this clause is used to specify how to fill when there is data missing in any window.
- `Window Pseudocolumns` If you need to output the time window information corresponding to the aggregation result in the results, you need to use window pseudocolumns in the SELECT clause: the start time of the time window (_wstart), the end time of the time window (_wend), etc.
For a detailed introduction to these features, please refer to [Time-Series Extensions](../../taos-sql/distinguished/).
## Create Dashboard
Return to the main interface to create a Dashboard, click Add Query to enter the panel query page:
![TDengine Database TDinsight plugin create dashboard 1](./grafana/create_dashboard1.webp)
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query. We will continue to use power meters as an example. In order to demonstrate the beautiful curves, **virtual data** is used here.
## Time Series Data Display
Suppose we want to query the average current size over a period of time, with the time window divided by $interval, and fill with null if data is missing in any time window interval.
- INPUT SQL: Enter the statement to be queried (the result set of this SQL statement should be two columns and multiple rows), here enter: `select _wstart as ts, avg(current) as current from power.meters where groupid in ($selected_groups) and ts > $from and ts < $to interval($interval) fill(null)`, where from, to, and interval are built-in variables of the Grafana, selected_groups is a custom variable.
- ALIAS BY: You can set the current query alias.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final execution statement.
In the custom variables at the top, if the value of `selected_groups` is selected as 1, then the query for the average value change of all device currents in the `meters` supertable where `groupid` is 1 is as shown in the following figure:
![TDengine Database Grafana plugin create dashboard](./grafana/create_dashboard2.webp)
:::note
Since the REST interface is stateless, it is not possible to use the `use db` statement to switch databases. In the SQL statement in the Grafana plugin, you can use \<db_name>.\<table_name> to specify the database.
:::
## Time Series Data Group Display
Suppose we want to query the average current value over a period of time, displayed by `groupid` grouping, we can modify the previous SQL to `select _wstart as ts, groupid, avg(current) as current from power.meters where ts > $from and ts < $to partition by groupid interval($interval) fill(null)`
- Group by column(s): **Half-width** comma-separated `group by` or `partition by` column names. If it is a `group by` or `partition by` query statement, setting the `Group by` column can display multidimensional data. Here, set the Group by column name to `groupid`, which can display data grouped by `groupid`.
- Group By Format: Legend formatting format for multidimensional data in Group by or Partition by scenarios. For example, the above INPUT SQL, setting `Group By Format` to `groupid-{{groupid}}`, the displayed legend name is the formatted group name.
After completing the settings, the data is displayed grouped by `groupid` as shown in the following figure:
![TDengine Database Grafana plugin create dashboard](./grafana/create_dashboard3.webp)
> For more information on how to use Grafana to create the appropriate monitoring interface and for more details on using Grafana, refer to the official Grafana [documentation](https://grafana.com/docs/).
### Importing the Dashboard
## Performance Suggestions
- **Include time range in all queries**, in time series databases, if the time range is not included in the query, it will lead to table scanning and poor performance. A common SQL writing example is `select column_name from db.table where ts > $from and ts < $to;`
- For queries of the latest status type, we generally recommend **enabling cache when creating the database** (`CACHEMODEL` set to last_row or both), a common SQL writing example is `select last(column_name) from db.table where ts > $from and ts < $to;`
## Importing the Dashboard
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly.
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)).
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167).
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
@ -230,3 +290,137 @@ For more dashboards using TDengine data source, [search here in Grafana](https:/
- [15155](https://grafana.com/grafana/dashboards/15155): TDengine alert demo.
- [15167](https://grafana.com/grafana/dashboards/15167): TDinsight.
- [16388](https://grafana.com/grafana/dashboards/16388): Telegraf node metrics dashboard using TDengine data source.
## Alert Configuration Introduction
### Alert Configuration Steps
The TDengine Grafana plugin supports alerts. To configure alerts, the following steps are required:
1. Configure Contact Points: Set up notification channels, including DingDing, Email, Slack, WebHook, Prometheus Alertmanager, etc.
2. Configure Notification Policies: Set up routing for which channel to send alerts to, as well as the timing and frequency of notifications.
3. Configure "Alert rules": Set up detailed alert rules.
3.1 Configure alert name.
3.2 Configure query and alert trigger conditions.
3.3 Configure evaluation behavior.
3.4 Configure labels and notifications.
3.5 Configure annotations.
### Alert Configuration Web UI
In Grafana 11, the alert Web UI has 6 tabs: "Alert rules", "Contact points", "Notification policies", "Silences", "Groups", and "Settings".
- "Alert rules" displays and configures alert rules.
- "Contact points" support notification channels such as DingDing, Email, Slack, WebHook, Prometheus Alertmanager, etc.
- "Notification policies" sets up routing for which channel to send alerts to, as well as the timing and frequency of notifications.
- "Silences" configures silent periods for alerts.
- "Groups" displays grouped alerts after they are triggered.
- "Admin" allows modifying alert configurations through JSON.
## Configuring Email Contact Point
### Modifying Grafana Server Configuration File
Add SMTP/Emailing and Alerting modules to the Grafana service configuration file. For Linux systems, the configuration file is usually located at `/etc/grafana/grafana.ini`.
Add the following content to the configuration file:
```ini
#################################### SMTP / Emailing ##########################
[smtp]
enabled = true
host = smtp.qq.com:465 #Email service used
user = receiver@foxmail.com
password = *********** #Use mail authorization code
skip_verify = true
from_address = sender@foxmail.com
```
Then restart the Grafana service. For example, on a Linux system, execute `systemctl restart grafana-server.service`
### Grafana Configuration for Email Contact Point
Find "Home" -> "Alerting" -> "Contact points" on the Grafana page to create a new contact point
"Name": Email Contact Point
"Integration": Select the contact type, here choose Email, fill in the email receiving address, and save the contact point after completion
![TDengine Database Grafana plugin alert email](./grafana/alert-email.webp)
## Configuring Feishu Contact Point
### Feishu Robot Configuration
1. "Feishu Workspace" -> "Get Apps" -> "Search for Feishu Robot Assistant" -> "Create Command"
2. Choose Trigger: Grafana
3. Choose Action: Send a message through the official robot, fill in the recipient and message content
![TDengine Database Grafana plugin feishu robot](./grafana/alert-feishu1.webp)
### Grafana Configuration for Feishu Contact Point
Find "Home" -> "Alerting" -> "Contact points" on the Grafana page to create a new contact point
"Name": Feishu Contact Point
"Integration": Select the contact type, here choose Webhook, and fill in the URL (the Grafana trigger Webhook address in Feishu Robot Assistant), then save the contact point
![TDengine Database Grafana plugin feishu contact point](./grafana/alert-feishu2.webp)
## Notification Policy
After configuring the contact points, you can see there is a Default Policy
![TDengine Database Grafana plugin Notification default policy](./grafana/alert-notification1.webp)
Click on the "..." on the right -> "Edit", then edit the default notification policy, a configuration window pops up:
![TDengine Database Grafana plugin Notification](./grafana/alert-notification2.webp)
Configure the parameters as shown in the screenshot above.
## Configuring Alert Rules
### Define Query and Alert Conditions
Select "Edit" -> "Alert" -> "New alert rule" in the panel where you want to configure the alert.
1. "Enter alert rule name": Here, enter `power meters alert` as an example for smart meters.
2. "Define query and alert condition":
2.1 Choose data source: `TDengine Datasource`
2.2 Query statement:
```sql
select _wstart as ts, groupid, avg(current) as current from power.meters where ts > $from and ts < $to partition by groupid interval($interval) fill(null)
```
2.3 Set "Expression": `Threshold is above 100`
2.4 Click "Set as alert condition"
2.5 "Preview": View the results of the set rules
After completing the settings, you can see the image displayed below:
![TDengine Database Grafana plugin Alert Rules](./grafana/alert-rules1.webp)
### Configuring Expressions and Calculation Rules
Grafana's "Expression" supports various operations and calculations on data, which are divided into:
1. "Reduce": Aggregates the values of a time series within the selected time range into a single value
1.1 "Function" is used to set the aggregation method, supporting Min, Max, Last, Mean, Sum, and Count.
1.2 "Mode" supports the following three:
- "Strict": If no data is queried, the data will be assigned NaN.
- "Drop Non-numeric Value": Remove illegal data results.
- "Replace Non-numeric Value": If it is illegal data, replace it with a constant value.
2. "Threshold": Checks whether the time series data meets the threshold judgment condition. Returns 0 when the condition is false, and 1 when true. Supports the following methods:
- Is above (x > y)
- Is below (x < y)
- Is within range (x > y1 AND x < y2)
- Is outside range (x < y1 AND x > y2)
3. "Math": Performs mathematical operations on the data of the time series.
4. "Resample": Changes the timestamps in each time series to have a consistent interval, so that mathematical operations can be performed between them.
5. "Classic condition (legacy)": Multiple logical conditions can be configured to determine whether to trigger an alert.
As shown in the screenshot above, here we set the alert to trigger when the maximum value exceeds 100.
### Configuring Evaluation behavior
![TDengine Database Grafana plugin Alert Evaluation Behavior](./grafana/alert-evaluation.webp)
Configure the parameters as shown in the screenshot above.
### Configuring Labels and Notifications
![TDengine Database Grafana plugin Alert Labels and Notifications](./grafana/alert-labels.webp)
Configure the parameters as shown in the screenshot above.
### Configuring annotations
![TDengine Database Grafana plugin Alert Labels and Notifications](./grafana/alert-annotations.webp)
After setting "Summary" and "Description", you will receive an alert notification if the alert is triggered.

View File

@ -73,6 +73,6 @@ Query OK, 3 row(s) in set (0.013269s)
- TDengine take influxdb format data and create unique ID for table names by the rule.
The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](../../reference/schemaless/#Schemaless-Line-Protocol)
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](../../reference/schemaless/)
:::

View File

@ -94,7 +94,7 @@ The output as bellow:
The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix).
TDengine Sink Connector internally uses TDengine [modeless write interface](../../client-libraries/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json).
TDengine Sink Connector internally uses TDengine [modeless write interface](../../client-libraries/cpp/#schemaless-writing-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json).
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.

View File

@ -4,33 +4,43 @@ title: Power BI
description: Use PowerBI and TDengine to analyze time series data
---
## Introduction
# Tools - Power BI
With TDengine ODBC driver, PowerBI can access time series data stored in TDengine. You can import tag data, original time series data, or aggregated data into PowerBI from TDengine, to create reports or dashboard without any coding effort.
![Power BI use step](./powerbi-step-en.png)
## Steps
![Power BI use step](./powerbi-step-en.webp)
[Power BI](https://powerbi.microsoft.com/) is a business analytics tool provided by Microsoft. With TDengine ODBC driver, PowerBI can access time series data stored in the TDengine. You can import tag data, original time series data, or aggregated data into Power BI from a TDengine, to create reports or dashboard without any coding effort.
### Prerequisites
### Prerequisite
1. TDengine server software is installed and running.
2. Power BI Desktop has been installed and running (If not, please download and install the latest Windows X64 version from [PowerBI](https://www.microsoft.com/zh-cn/download/details.aspx?id=58494) ).
1. TDengine server has been installed and running well.
2. Power BI Desktop has been installed and running. (If not, please download and install latest Windows X64 version from [PowerBI](https://www.microsoft.com/download/details.aspx?id=58494).
### Install ODBC connector
1. Only support Windows operation system. And you need to install [VC Runtime Library](https://learn.microsoft.com/zh-cn/cpp/windows/latest-supported-vc-redist?view=msvc-170) first. If already installed, please ignore this step.
2. Install [TDengine Windows client installation package](https://docs.taosdata.com/get-started/package/).
### Configure ODBC DataSource
1. Click the "Start" Menu, and Search for "ODBC", and choose "ODBC Data Source (64-bit)" (Note: Don't choose 32-bit).
2. Select the "User DSN" tab, and click "Add" button to enter the page for "Create Data Source".
3. Choose the data source to be added, here we choose "TDengine" and click "Finish", and enter the configuration page for "TDengine ODBC Data Source", fill in required fields as the following:
&emsp;&emsp;[DSN]:&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&ensp;Data Source Name, required field, such as "MyTDengine"
Depending on your TDengine server version, download appropriate version of TDengine client package from TDengine website [Download Link](https://docs.tdengine.com/get-started/package/), or TDengine explorer if you are using a local TDengine cluster. Install the TDengine client package on same Windows machine where PowerBI is running.
## Install Driver
&emsp;&emsp;[URL]:&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&ensp;&nbsp;taos://localhost:6041
Depending on your TDengine server version, download appropriate version of TDengine client package from TDengine website [Download Link](../../get-started/package/), or TDengine explorer if you are using a local TDengine cluster. Install the TDengine client package on same Windows machine where PowerBI is running.
&emsp;&emsp;[Database]:&emsp;&emsp;&emsp;&emsp;&ensp;optional field, the default database to access, such as "test"
### Configure Data Source
&emsp;&emsp;[UserID]:&emsp;&emsp;&emsp;&emsp;&emsp;&ensp;Enter the user name. If this parameter is not specified, the user name is root by default
Please refer to [ODBC](../../client-libraries/odbc) to configure TDengine ODBC Driver with WebSocket connection.
&emsp;&emsp;[Password]:&emsp;&emsp;&emsp;&emsp;&nbsp;Enter the user password. If not, the default is taosdata
4. Click "Test Connection" to test whether the data source can be connectted; if successful, it will prompt "Successfully connected to taos://root:taosdata@localhost:6041".
### Import Data from TDengine to Power BI
1. Open Power BI and logon, add data source following steps "Home Page" -> "Get Data" -> "Others" -> "ODBC" -> "Connect"
2. Choose data source name, connect to configured data source, go to the nativator, browse tables of the selected database and load data
1. Open Power BI and logon, add data source following steps "Home" -> "Get data" -> "Other" -> "ODBC" -> "Connect".
2. Choose the created data source name, such as "MyTDengine", then click "OK" button to open the "ODBC Driver" dialog. In the dialog, select "Default or Custom" left menu and then click "Connect" button to connect to the configured data source. After go to the "Nativator", browse tables of the selected database and load data.
3. If you want to input some specific SQL, click "Advanced Options", and input your SQL in the open dialogue box and load the data.
@ -49,17 +59,9 @@ To better use Power BI to analyze the data stored in TDengine, you need to under
4. Correlation: Indicates how to correlate data. Dimentions and Metrics can be correlated by tbname, dates and metrics can be correlated by date. All these can cooperate to form visual reports.
### Example - Meters
TDengine has its own specific data model, which uses supertable as template and creates a specific table for each device. Each table can have maximum 4,096 data columns and 128 tags. In the example of meters, assume each meter generates one record per second, then there will be 86,400 records each day and 31,536,000 records every year, then only 1,000 meters will occupy 500GB disk space. So, the common usage of Power BI should be mapping tags to dimention columns, mapping the aggregation of data columns to metric columns, to provide indicators for decision makers.
1. Import Dimentions
Import the tags of tables in PowerBI, and name as "tags", the SQL is like `select distinct tbname, groupid, location from test.meters;`.
2. Import Metrics
In Power BI, import the average current, average voltage, average phase with 1 hour window, and name it as "data", the SQL is like `select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)` .
3. Correlate Dimentions and Metrics
In Power BI, open model view, correlate "tags" and "data", and set "tabname" as the correlation column, then you can use the data in histogram, pie chart, etc. For more information about building visual reports in PowerBI, please refer to [Power BI](https://learn.microsoft.com/power-bi/)。
TDengine has its own specific data model, which uses supertable as template and creates a specific table for each device. Each table can have maximum 4,096 data columns and 128 tags. In [the example of meters](https://docs.taosdata.com/concept/) , assume each meter generates one record per second, then there will be 86,400 records each day and 31,536,000 records every year, then only 1,000 meters will occupy 500GB disk space. So, the common usage of Power BI should be mapping tags to dimension columns, mapping the aggregation of data columns to metric columns, to provide indicators for decision makers.
1. Import Dimensions: Import the tags of tables in PowerBI, and name as "tags", the SQL is as the following:
`select distinct tbname, groupid, location from test.meters;`
2. Import Metrics: In Power BI, import the average current, average voltage, average phase with 1 hour window, and name it as "data", the SQL is as the following:
`select tbname, _wstart ws, avg(current), avg(voltage), avg(phase) from test.meters PARTITION by tbname interval(1h)` ;
3. Correlate Dimensions and Metrics: In Power BI, open model view, correlate "tags" and "data", and set "tabname" as the correlation column, then you can use the data in histogram, pie chart, etc. For more information about building visual reports in PowerBI, please refer to [Power BI](https://learn.microsoft.com/zh-cn/power-bi/).

View File

@ -0,0 +1,64 @@
---
sidebar_label: Yonghong BI
title: Yonghong BI
description: Use YonghongBI and TDengine to analyze time series data
---
# Tools - Yonghong BI
![Yonghong BI use step](./yonghongbi-step-en.png)
[Yonghong one-stop big data BI platform](https://www.yonghongtech.com/)to provide enterprises of all sizes with flexible and easy-to-use whole-business chain big data analysis solutions, so that every user can use this platform to easily discover the value of big data and obtain deep insight. TDengine can be added to Yonghong BI as a data source via a JDBC connector. Once the data source is configured, Yonghong BI can read data from TDengine and provide functions such as data presentation, analysis and prediction.
### Prerequisite
1. Yonghong Desktop Basic is installed and running (if not,please go to [official download page of Yonghong Technology](https://www.yonghongtech.com/cp/desktop/) download).
2. The TDengine is installed and running, and ensure that the taosadapter service is started on the TDengine server side.
### Install JDBC Connector
Go to [maven.org](https://central.sonatype.com/artifact/com.taosdata.jdbc/taos-jdbcdriver/versions) download the latest TDengine JDBC connector (current version [3.2.7](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.7/taos-jdbcdriver-3.2.7-dist.jar)) and install it on the machine where the BI tool is running.
### Configure JDBC DataSource
1. In the Yonghong Desktop BI tool, click "Add data source" and select the "GENERIC" type in the SQL data source.
2. Click "Select Custom Driver", in the "Driver Management" dialog box, click "+" next to "Driver List", enter the name "MyTDengine". Then click the "upload file" button to upload just download TDengine JDBC connector file "taos-jdbcdriver-3.2.7-dist.jar", and select "com.taosdata.jdbc.rs.RestfulDriver" drive, Finally, click the "OK" button to complete the driver addition.
3. Then copy the following into the "URL" field:
```
jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata
```
4. Then select "No identity Authentication" under "Authentication Mode".
5. In the advanced Settings of the data source, change the value of the Quote symbol to the backquote "`".
6. Click "Test connection" and the dialog box "Test success" will pop up. Click the "Save" button and enter "tdengine" to save the TDengine data source.
### Create TDengine datasets
1. Click "Add Data Set" in the BI tool, expand the data source you just created, and browse the super table in TDengine.
2. You can load all the data of the super table into the BI tool, or you can import some data through custom SQL statements.
3. When "Computation in Database" is selected, the BI tool will no longer cache TDengine timing data and will send SQL requests to TDengine for direct processing when processing queries.
When data is imported, the BI tool automatically sets the numeric type to the "metric" column and the text type to the "dimension" column. In TDengine super tables, ordinary columns are used as data metrics and label columns are used as data dimensions, so you may need to change the properties of some columns when you create a dataset. On the basis of supporting standard SQL, TDengine also provides a series of special query syntax to meet the requirements of time series business scenarios, such as data segmentation query, window segmentation query, etc., for [TDengine Specialized Queries](https://docs.taosdata.com/taos-sql/distinguished/) .By using these featured queries, BI tools can greatly improve data access speed and reduce network transmission bandwidth when they send SQL queries to TDengine databases.
In BI tools, you can create "parameters" and use them in SQL statements, which can be dynamically executed manually and periodically to achieve a visual report refresh effect.The following SQL statement:
```sql
select _wstart ws, count(*) cnt from supertable where tbname=?{metric} and ts >= ?{from} and ts < ?{to} interval(?{interval})
```
Data can be read in real time from TDengine, where:
- `_wstart`: Indicates the start time of the time window.
- `count(*)`: Indicates the aggregate value in the time window.
- `?{interval}`: Indicates that the parameter interval is introduced into the SQL statement. When the BI tool queries data, it assigns a value to the parameter interval. If the value is 1m, the sampling data is reduced based on a 1-minute time window.
- `?{metric}`: This parameter is used to specify the name of the data table to be queried. When the ID of a drop-down parameter component is set as metric in the BI tool, the selected items of the drop-down parameter component are bound to this parameter to achieve dynamic selection.
- `?{from}` `?{to}`:These two parameters are used to represent the time range of the query data set and can be bound with the Text Parameter Component.
You can modify the data type, data range, and default values of parameters in the "Edit Parameters" dialog box of the BI tool, and dynamically set the values of these parameters in the "Visual Report".
### Create a visual report
1. Click "Make Report" in Yonghong BI tool to create a canvas.
2. Drag visual components, such as Table Components, onto the canvas.
3. Select the data set to be bound in the Data Set sidebar, and bind Dimensions and Measures in the data column to Table Components as needed.
4. Click "Save" to view the report.
5. For more information about Yonghong BI tools, please consult them [Help document](https://www.yonghongtech.com/help/Z-Suite/10.0/ch/).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 157 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -90,7 +90,7 @@ Through TAOSC caching mechanism, mnode needs to be accessed only when a table is
### Storage Model
The data stored by TDengine includes collected time-series data, metadata and tag data related to database and tablesetc. All of the data is specifically divided into three parts:
The data stored by TDengine includes collected time-series data, metadata and tag data related to database and tables, etc. All of the data is specifically divided into three parts:
- Time-series data: stored in vnode and composed of data, head and last files. Normally the amount of time series data is very huge and query amount depends on the application scenario. Out-of-order writing is allowed. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point.
- Table Metadata: table meta data includes tags and table schema and is stored in meta file in each vnode. CRUD can be operated on table metadata. There is a specific record for each table, so the amount of table meta data depends on the number of tables. Table meta data is stored in LRU model and supports index for tag data. TDengine can support multiple queries in parallel. As long as the memory resource is enough, meta data is all stored in memory for quick access. The filtering on tens of millions of tags can be finished in a few milliseconds. Even though when the memory resource is not sufficient, TDengine can still perform high speed query on tens of millions of tables.
@ -197,20 +197,20 @@ By default, TDengine saves all data in /var/lib/taos directory, and the data fil
dataDir format is as follows:
```
dataDir data_path [tier_level]
dataDir data_path [tier_level] [primary] [disable_create_new_file]
```
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
Where `data_path` is the folder path of mount point, and `tier_level` is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. And `primary` means whether the data dir is the primary mount point. Enter 0 for false or 1 for true. The default value is 1. A TDengine cluster can have only one `primary` mount point, which must be on tier 0. And `disable_create_new_file` means whether to prohibit the creation of new file sets on the specified mount point. Enter 0 for false and 1 for true. The default value is 0. Tier 0 storage must have at least one mount point with disable_create_new_file set to 0. Tier 1 and tier 2 storage do not have this restriction.
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
dataDir /mnt/disk1/taos
dataDir /mnt/disk2/taos 0
dataDir /mnt/disk3/taos 1
dataDir /mnt/disk4/taos 1
dataDir /mnt/disk5/taos 2
dataDir /mnt/disk6/taos 2
dataDir /mnt/disk1/taos 0 1 0
dataDir /mnt/disk2/taos 0 0 0
dataDir /mnt/disk3/taos 1 0 0
dataDir /mnt/disk4/taos 1 0 1
dataDir /mnt/disk5/taos 2 0 0
dataDir /mnt/disk6/taos 2 0 0
```
Mounted disks can also be a non-local network disk, as long as the system can access it.

View File

@ -4,12 +4,38 @@ sidebar_label: TDengine
description: This document provides download links for all released versions of TDengine 3.0.
---
## TDengine Version Rules
TDengine version number consists of 4 numbers separated by `.`, defined as below:
- `[Major+].[Major].[Feature].[Maintenance]`
- `Major+`: Significant rearchitecture release, can't be upgraded from an old version with different `Major+` number. If you have such a need, please contact TDengine support team.
- `Major`: Important new feature release, can't be rolling upgraded from old version with different `Major` number, and can't be rolled back after upgrading. For example, after upgrading from `3.2.3.0` to `3.3.0.0`, you can't roll back to `3.2.3.0`.
- `Feature`New feature release, can't be rolling upgraded from an old version with different `Feature` number, but can be rolled back after upgrading. For example, after upgrading from `3.3.0.0` to `3.3.1.0`, you can roll back to `3.3.0.0`. The client driver (libtaos.so) must be upgraded to same version as the server side (taosd).
- `Maintenance`: Maintenance release, no new features but only bug fixings, can be rolling upgraded from an old version with only `Maintenance` number different, and can be rolled back after upgrading.
- `Rolling Upgrade`: For a cluster consisting of three or more dnodes with three replica enabled, you can upgrade one dnode each time by stopping it, upgrading it, and then restarting it, repeat this process to upgrade the whole cluster. During this period, the cluster is still in service. If rolling upgrade is not supported based on the above version number rules, you need to first stop the whole cluster, upgrade all dndoes, and restart all dnodes after upgrading. During this period, the cluster is out of service.
TDengine 3.x installation packages can be downloaded at the following links:
For TDengine 2.x installation packages by version, please visit [here](https://tdengine.com/downloads/historical/).
import Release from "/components/ReleaseV3";
## 3.3.2.0
<Release type="tdengine" version="3.3.2.0" />
## 3.3.1.0
<Release type="tdengine" version="3.3.1.0" />
## 3.3.0.3
<Release type="tdengine" version="3.3.0.3" />
## 3.3.0.0
<Release type="tdengine" version="3.3.0.0" />
## 3.2.3.0
<Release type="tdengine" version="3.2.3.0" />

View File

@ -1,5 +1,5 @@
if (! "RJDBC" %in% installed.packages()[, "Package"]) {
install.packages('RJDBC', repos='http://cran.us.r-project.org')
install.packages('RJDBC', repos='http://mirrors.tuna.tsinghua.edu.cn/CRAN')
}
# ANCHOR: demo

View File

@ -0,0 +1,113 @@
package main
import (
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/af/tmq"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
)
func main() {
db, err := af.Open("", "root", "taosdata", "", 0)
if err != nil {
panic(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists power WAL_RETENTION_PERIOD 86400")
if err != nil {
panic(err)
}
_, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
panic(err)
}
_, err = db.Exec("create table if not exists power.d001 using power.meters tags(1,'location')")
if err != nil {
panic(err)
}
// ANCHOR: create_topic
_, err = db.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters")
if err != nil {
panic(err)
}
// ANCHOR_END: create_topic
// ANCHOR: create_consumer
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"group.id": "test",
"auto.offset.reset": "latest",
"td.connect.ip": "127.0.0.1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"msg.with.table.name": "true",
})
if err != nil {
panic(err)
}
// ANCHOR_END: create_consumer
// ANCHOR: poll_data
go func() {
for {
_, err = db.Exec("insert into power.d001 values (now, 1.1, 220, 0.1)")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
err = consumer.Subscribe("topic_meters", nil)
if err != nil {
panic(err)
}
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
fmt.Printf("get message:%v\n", e)
case tmqcommon.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
panic(e)
}
consumer.Commit()
}
}
// ANCHOR_END: poll_data
// ANCHOR: consumer_seek
partitions, err := consumer.Assignment()
if err != nil {
panic(err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
err = consumer.Seek(tmqcommon.TopicPartition{
Topic: partitions[i].Topic,
Partition: partitions[i].Partition,
Offset: 0,
}, 0)
if err != nil {
panic(err)
}
}
partitions, err = consumer.Assignment()
if err != nil {
panic(err)
}
// ANCHOR_END: consumer_seek
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
}
// ANCHOR: consumer_close
err = consumer.Close()
if err != nil {
panic(err)
}
// ANCHOR_END: consumer_close
}

View File

@ -0,0 +1,115 @@
package main
import (
"database/sql"
"fmt"
"os"
"time"
"github.com/taosdata/driver-go/v3/common"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
_ "github.com/taosdata/driver-go/v3/taosRestful"
"github.com/taosdata/driver-go/v3/ws/tmq"
)
func main() {
db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/")
if err != nil {
panic(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists power WAL_RETENTION_PERIOD 86400")
if err != nil {
panic(err)
}
_, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
panic(err)
}
_, err = db.Exec("create table if not exists power.d001 using power.meters tags(1,'location')")
if err != nil {
panic(err)
}
// ANCHOR: create_topic
_, err = db.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters")
if err != nil {
panic(err)
}
// ANCHOR_END: create_topic
// ANCHOR: create_consumer
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"ws.url": "ws://127.0.0.1:6041",
"ws.message.channelLen": uint(0),
"ws.message.timeout": common.DefaultMessageTimeout,
"ws.message.writeWait": common.DefaultWriteWait,
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"group.id": "example",
"client.id": "example_consumer",
"auto.offset.reset": "latest",
})
if err != nil {
panic(err)
}
// ANCHOR_END: create_consumer
// ANCHOR: poll_data
go func() {
for {
_, err = db.Exec("insert into power.d001 values (now, 1.1, 220, 0.1)")
if err != nil {
panic(err)
}
time.Sleep(time.Millisecond * 100)
}
}()
err = consumer.Subscribe("topic_meters", nil)
if err != nil {
panic(err)
}
for i := 0; i < 5; i++ {
ev := consumer.Poll(500)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
fmt.Printf("get message:%v\n", e)
case tmqcommon.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
panic(e)
}
consumer.Commit()
}
}
// ANCHOR_END: poll_data
// ANCHOR: consumer_seek
partitions, err := consumer.Assignment()
if err != nil {
panic(err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
err = consumer.Seek(tmqcommon.TopicPartition{
Topic: partitions[i].Topic,
Partition: partitions[i].Partition,
Offset: 0,
}, 0)
if err != nil {
panic(err)
}
}
partitions, err = consumer.Assignment()
if err != nil {
panic(err)
}
// ANCHOR_END: consumer_seek
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
}
// ANCHOR: consumer_close
err = consumer.Close()
if err != nil {
panic(err)
}
// ANCHOR_END: consumer_close
}

View File

@ -0,0 +1,76 @@
package main
import (
"context"
"database/sql"
"log"
"time"
"github.com/taosdata/driver-go/v3/common"
_ "github.com/taosdata/driver-go/v3/taosSql"
)
func main() {
var taosDSN = "root:taosdata@tcp(localhost:6030)/"
taos, err := sql.Open("taosSql", taosDSN)
if err != nil {
log.Fatalln("failed to connect TDengine, err:", err)
}
defer taos.Close()
// ANCHOR: create_db_and_table
_, err = taos.Exec("CREATE DATABASE if not exists power")
if err != nil {
log.Fatalln("failed to create database, err:", err)
}
_, err = taos.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
log.Fatalln("failed to create stable, err:", err)
}
// ANCHOR_END: create_db_and_table
// ANCHOR: insert_data
affected, err := taos.Exec("INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) ")
if err != nil {
log.Fatalln("failed to insert data, err:", err)
}
log.Println("affected rows:", affected)
// ANCHOR_END: insert_data
// ANCHOR: query_data
rows, err := taos.Query("SELECT * FROM power.meters")
if err != nil {
log.Fatalln("failed to select from table, err:", err)
}
defer rows.Close()
for rows.Next() {
var (
ts time.Time
current float32
voltage int
phase float32
groupId int
location string
)
err := rows.Scan(&ts, &current, &voltage, &phase, &groupId, &location)
if err != nil {
log.Fatalln("scan error:\n", err)
return
}
log.Println(ts, current, voltage, phase, groupId, location)
}
// ANCHOR_END: query_data
// ANCHOR: with_reqid
ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID())
_, err = taos.ExecContext(ctx, "CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatalln("failed to create database, err:", err)
}
// ANCHOR_END: with_reqid
}

View File

@ -0,0 +1,41 @@
package main
import (
"fmt"
"github.com/taosdata/driver-go/v3/af"
)
const LineDemo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639000000"
const TelnetDemo = "stb0_0 1707095283260 4 host=host0 interface=eth0"
const JsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"
func main() {
conn, err := af.Open("localhost", "root", "taosdata", "", 6030)
if err != nil {
fmt.Println("fail to connect, err:", err)
}
defer conn.Close()
_, err = conn.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
panic(err)
}
_, err = conn.Exec("use power")
if err != nil {
panic(err)
}
err = conn.InfluxDBInsertLines([]string{LineDemo}, "ns")
if err != nil {
panic(err)
}
err = conn.OpenTSDBInsertTelnetLines([]string{TelnetDemo})
if err != nil {
panic(err)
}
err = conn.OpenTSDBInsertJsonPayload(JsonDemo)
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,54 @@
package main
import (
"database/sql"
"log"
"time"
"github.com/taosdata/driver-go/v3/common"
_ "github.com/taosdata/driver-go/v3/taosWS"
"github.com/taosdata/driver-go/v3/ws/schemaless"
)
const LineDemo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639000000"
const TelnetDemo = "stb0_0 1707095283260 4 host=host0 interface=eth0"
const JsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"
func main() {
db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/")
if err != nil {
log.Fatal(err)
}
defer db.Close()
_, err = db.Exec("create database if not exists power")
if err != nil {
log.Fatal(err)
}
s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041", 1,
schemaless.SetDb("power"),
schemaless.SetReadTimeout(10*time.Second),
schemaless.SetWriteTimeout(10*time.Second),
schemaless.SetUser("root"),
schemaless.SetPassword("taosdata"),
schemaless.SetErrorHandler(func(err error) {
log.Fatal(err)
}),
))
if err != nil {
panic(err)
}
err = s.Insert(LineDemo, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID())
if err != nil {
panic(err)
}
err = s.Insert(TelnetDemo, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID())
if err != nil {
panic(err)
}
err = s.Insert(JsonDemo, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID())
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,81 @@
package main
import (
"fmt"
"strconv"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/param"
)
const (
NumOfSubTable = 10
NumOfRow = 10
)
func main() {
prepare()
db, err := af.Open("", "root", "taosdata", "power", 0)
if err != nil {
panic(err)
}
defer db.Close()
stmt := db.InsertStmt()
defer stmt.Close()
err = stmt.Prepare("INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)")
if err != nil {
panic(err)
}
for i := 1; i <= NumOfSubTable; i++ {
tags := param.NewParam(2).AddInt(i).AddBinary([]byte("location"))
err = stmt.SetTableNameWithTags("d_bind_"+strconv.Itoa(i), tags)
if err != nil {
panic(err)
}
now := time.Now()
params := make([]*param.Param, 4)
params[0] = param.NewParam(NumOfRow)
params[1] = param.NewParam(NumOfRow)
params[2] = param.NewParam(NumOfRow)
params[3] = param.NewParam(NumOfRow)
for i := 0; i < NumOfRow; i++ {
params[0].SetTimestamp(i, now.Add(time.Duration(i)*time.Second), common.PrecisionMilliSecond)
params[1].SetFloat(i, float32(i))
params[2].SetInt(i, i)
params[3].SetFloat(i, float32(i))
}
paramTypes := param.NewColumnType(4).AddTimestamp().AddFloat().AddInt().AddFloat()
err = stmt.BindParam(params, paramTypes)
if err != nil {
panic(err)
}
err = stmt.AddBatch()
if err != nil {
panic(err)
}
err = stmt.Execute()
if err != nil {
panic(err)
}
affected := stmt.GetAffectedRows()
fmt.Println("affected rows:", affected)
}
}
func prepare() {
db, err := af.Open("", "root", "taosdata", "", 0)
if err != nil {
panic(err)
}
defer db.Close()
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
panic(err)
}
_, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,95 @@
package main
import (
"database/sql"
"fmt"
"strconv"
"time"
"github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/param"
_ "github.com/taosdata/driver-go/v3/taosRestful"
"github.com/taosdata/driver-go/v3/ws/stmt"
)
const (
NumOfSubTable = 10
NumOfRow = 10
)
func main() {
prepare()
config := stmt.NewConfig("ws://127.0.0.1:6041", 0)
config.SetConnectUser("root")
config.SetConnectPass("taosdata")
config.SetConnectDB("power")
config.SetMessageTimeout(common.DefaultMessageTimeout)
config.SetWriteWait(common.DefaultWriteWait)
config.SetErrorHandler(func(connector *stmt.Connector, err error) {
panic(err)
})
config.SetCloseHandler(func() {
fmt.Println("stmt connector closed")
})
connector, err := stmt.NewConnector(config)
if err != nil {
panic(err)
}
stmt, err := connector.Init()
err = stmt.Prepare("INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)")
if err != nil {
panic(err)
}
for i := 1; i <= NumOfSubTable; i++ {
tags := param.NewParam(2).AddInt(i).AddBinary([]byte("location"))
err = stmt.SetTableName("d_bind_" + strconv.Itoa(i))
if err != nil {
panic(err)
}
err = stmt.SetTags(tags, param.NewColumnType(2).AddInt().AddBinary(8))
now := time.Now()
params := make([]*param.Param, 4)
params[0] = param.NewParam(NumOfRow)
params[1] = param.NewParam(NumOfRow)
params[2] = param.NewParam(NumOfRow)
params[3] = param.NewParam(NumOfRow)
for i := 0; i < NumOfRow; i++ {
params[0].SetTimestamp(i, now.Add(time.Duration(i)*time.Second), common.PrecisionMilliSecond)
params[1].SetFloat(i, float32(i))
params[2].SetInt(i, i)
params[3].SetFloat(i, float32(i))
}
paramTypes := param.NewColumnType(4).AddTimestamp().AddFloat().AddInt().AddFloat()
err = stmt.BindParam(params, paramTypes)
if err != nil {
panic(err)
}
err = stmt.AddBatch()
if err != nil {
panic(err)
}
err = stmt.Exec()
if err != nil {
panic(err)
}
affected := stmt.GetAffectedRows()
fmt.Println("affected rows:", affected)
}
}
func prepare() {
db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/")
if err != nil {
panic(err)
}
defer db.Close()
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
panic(err)
}
_, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
panic(err)
}
}

View File

@ -22,7 +22,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.2.7-SNAPSHOT</version>
<version>3.3.0</version>
</dependency>
<!-- ANCHOR_END: dep-->
<dependency>

View File

@ -0,0 +1,53 @@
const taos = require("@tdengine/websocket");
var host = null;
for(var i = 2; i < global.process.argv.length; i++){
var key = global.process.argv[i].split("=")[0];
var value = global.process.argv[i].split("=")[1];
if("host" == key){
host = value;
}
}
if(host == null){
console.log("Usage: node nodejsChecker.js host=<hostname> port=<port>");
process.exit(0);
}
let dbData = ["{\"metric\": \"meter_current\",\"timestamp\": 1626846402,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}",
"{\"metric\": \"meter_current\",\"timestamp\": 1626846403,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1002\"}}",
"{\"metric\": \"meter_current\",\"timestamp\": 1626846404,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1003\"}}"]
async function createConnect() {
let dsn = 'ws://' + host + ':6041'
let conf = new taos.WSConfig(dsn);
conf.setUser('root');
conf.setPwd('taosdata');
conf.setDb('power');
return await taos.sqlConnect(conf);
}
async function test() {
let wsSql = null;
let wsRows = null;
let reqId = 0;
try {
wsSql = await createConnect()
await wsSql.exec('CREATE DATABASE IF NOT EXISTS power KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;', reqId++);
await wsSql.schemalessInsert([dbData], taos.SchemalessProto.OpenTSDBJsonFormatProtocol, taos.Precision.SECONDS, 0);
}
catch (err) {
console.error(err.code, err.message);
}
finally {
if (wsRows) {
await wsRows.close();
}
if (wsSql) {
await wsSql.close();
}
taos.destroy();
}
}
test()

View File

@ -0,0 +1,49 @@
const taos = require("@tdengine/websocket");
let influxdbData = ["meters,location=California.LosAngeles,groupId=2 current=11.8,voltage=221,phase=0.28 1648432611249",
"meters,location=California.LosAngeles,groupId=2 current=13.4,voltage=223,phase=0.29 1648432611250",
"meters,location=California.LosAngeles,groupId=3 current=10.8,voltage=223,phase=0.29 1648432611249"];
let jsonData = ["{\"metric\": \"meter_current\",\"timestamp\": 1626846402,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}",
"{\"metric\": \"meter_current\",\"timestamp\": 1626846403,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1002\"}}",
"{\"metric\": \"meter_current\",\"timestamp\": 1626846404,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1003\"}}"]
let telnetData = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3"];
async function createConnect() {
let dsn = 'ws://localhost:6041'
let conf = new taos.WSConfig(dsn);
conf.setUser('root');
conf.setPwd('taosdata');
let wsSql = await taos.sqlConnect(conf);
await wsSql.exec('CREATE DATABASE IF NOT EXISTS power KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;');
await wsSql.exec('USE power');
return wsSql;
}
async function test() {
let wsSql = null;
let wsRows = null;
let ttl = 0;
try {
wsSql = await createConnect()
await wsSql.schemalessInsert(influxdbData, taos.SchemalessProto.InfluxDBLineProtocol, taos.Precision.MILLI_SECONDS, ttl);
await wsSql.schemalessInsert(jsonData, taos.SchemalessProto.OpenTSDBJsonFormatProtocol, taos.Precision.SECONDS, ttl);
await wsSql.schemalessInsert(telnetData, taos.SchemalessProto.OpenTSDBTelnetLineProtocol, taos.Precision.MILLI_SECONDS, ttl);
}
catch (err) {
console.error(err.code, err.message);
}
finally {
if (wsRows) {
await wsRows.close();
}
if (wsSql) {
await wsSql.close();
}
taos.destroy();
}
}
test()

View File

@ -0,0 +1,77 @@
const taos = require("@tdengine/websocket");
var host = null;
for(var i = 2; i < global.process.argv.length; i++){
var key = global.process.argv[i].split("=")[0];
var value = global.process.argv[i].split("=")[1];
if("host" == key){
host = value;
}
}
if(host == null){
console.log("Usage: node nodejsChecker.js host=<hostname> port=<port>");
process.exit(0);
}
async function createConnect() {
let dsn = 'ws://' + host + ':6041'
console.log(dsn)
let conf = new taos.WSConfig(dsn);
conf.setUser('root')
conf.setPwd('taosdata')
return await taos.sqlConnect(conf);
}
async function test() {
let wsSql = null;
let wsRows = null;
let reqId = 0;
try {
wsSql = await createConnect()
let version = await wsSql.version();
console.log(version);
let taosResult = await wsSql.exec('SHOW DATABASES', reqId++);
console.log(taosResult);
taosResult = await wsSql.exec('CREATE DATABASE IF NOT EXISTS power KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;', reqId++);
console.log(taosResult);
taosResult = await wsSql.exec('USE power', reqId++);
console.log(taosResult);
taosResult = await wsSql.exec('CREATE STABLE IF NOT EXISTS meters (_ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);', reqId++);
console.log(taosResult);
taosResult = await wsSql.exec('DESCRIBE meters', reqId++);
console.log(taosResult);
taosResult = await wsSql.exec('INSERT INTO d1001 USING meters (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, 10.2, 219, 0.32)', reqId++);
console.log(taosResult);
wsRows = await wsSql.query('SELECT * FROM meters', reqId++);
let meta = wsRows.getMeta();
console.log("wsRow:meta:=>", meta);
while (await wsRows.next()) {
let result = wsRows.getData();
console.log('queryRes.Scan().then=>', result);
}
}
catch (err) {
console.error(err.code, err.message);
}
finally {
if (wsRows) {
await wsRows.close();
}
if (wsSql) {
await wsSql.close();
}
taos.destroy();
}
}
test()

View File

@ -0,0 +1,143 @@
const taos = require("@tdengine/websocket");
// ANCHOR: createConnect
async function createConnect() {
let dsn = 'ws://localhost:6041';
let conf = new taos.WSConfig(dsn);
conf.setUser('root');
conf.setPwd('taosdata');
conf.setDb('power');
return await taos.sqlConnect(conf);
}
// ANCHOR_END: createConnect
// ANCHOR: create_db_and_table
async function createDbAndTable(wsSql) {
let wsSql = null;
try {
wsSql = await createConnect();
await wsSql.exec('CREATE DATABASE IF NOT EXISTS POWER ' +
'KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;');
await wsSql.exec('USE power');
await wsSql.exec('CREATE STABLE IF NOT EXISTS meters ' +
'(_ts timestamp, current float, voltage int, phase float) ' +
'TAGS (location binary(64), groupId int);');
taosResult = await wsSql.exec('describe meters');
console.log(taosResult);
} catch (err) {
console.error(err.code, err.message);
} finally {
if (wsSql) {
await wsSql.close();
}
}
}
// ANCHOR_END: create_db_and_table
// ANCHOR: insertData
async function insertData(wsSql) {
let wsSql = null;
try {
wsSql = await createConnect();
let insertQuery = "INSERT INTO " +
"power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) ";
taosResult = await wsSql.exec(insertQuery);
console.log(taosResult);
} catch (err) {
console.error(err.code, err.message);
} finally {
if (wsSql) {
await wsSql.close();
}
}
}
// ANCHOR_END: insertData
// ANCHOR: queryData
async function queryData() {
let wsRows = null;
let wsSql = null;
try {
wsSql = await createConnect();
wsRows = await wsSql.query('select * from meters');
let meta = wsRows.getMeta();
console.log("wsRow:meta:=>", meta);
while (await wsRows.next()) {
let result = wsRows.getData();
console.log('queryRes.Scan().then=>', result);
}
}
catch (err) {
console.error(err.code, err.message);
}
finally {
if (wsRows) {
await wsRows.close();
}
if (wsSql) {
await wsSql.close();
}
}
}
// ANCHOR_END: queryData
// ANCHOR: sqlWithReqid
async function sqlWithReqid(wsSql) {
let insertQuery = "INSERT INTO " +
"power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) ";
let wsRows = null;
let wsSql = null;
try {
wsSql = await createConnect();
taosResult = await wsSql.exec(insertQuery, 1);
wsRows = await wsSql.query('select * from meters', 2);
let meta = wsRows.getMeta();
console.log("wsRow:meta:=>", meta);
while (await wsRows.next()) {
let result = wsRows.getData();
console.log('queryRes.Scan().then=>', result);
}
}
catch (err) {
console.error(err.code, err.message);
}
finally {
if (wsRows) {
await wsRows.close();
}
if (wsSql) {
await wsSql.close();
}
}
}
// ANCHOR_END: sqlWithReqid
async function test() {
await createDbAndTable();
await insertData();
await queryData();
await sqlWithReqid();
taos.destroy();
}
test()

Some files were not shown because too many files have changed in this diff Show More