Merge branch 'v3.0' into fix/long_query
|
@ -128,3 +128,4 @@ tools/NEWS
|
||||||
tools/COPYING
|
tools/COPYING
|
||||||
tools/BUGS
|
tools/BUGS
|
||||||
tools/taos-tools
|
tools/taos-tools
|
||||||
|
tools/taosws-rs
|
||||||
|
|
|
@ -17,11 +17,12 @@ include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
|
||||||
|
|
||||||
# contrib
|
# contrib
|
||||||
add_subdirectory(contrib)
|
add_subdirectory(contrib)
|
||||||
|
|
||||||
|
set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES OFF)
|
||||||
|
|
||||||
# api
|
# api
|
||||||
add_library(api INTERFACE)
|
add_library(api INTERFACE)
|
||||||
target_include_directories(api INTERFACE "include/client")
|
target_include_directories(api INTERFACE "include/client")
|
||||||
|
@ -36,8 +37,7 @@ add_subdirectory(source)
|
||||||
add_subdirectory(tools)
|
add_subdirectory(tools)
|
||||||
add_subdirectory(utils)
|
add_subdirectory(utils)
|
||||||
add_subdirectory(examples/c)
|
add_subdirectory(examples/c)
|
||||||
|
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||||
|
|
||||||
# docs
|
# docs
|
||||||
add_subdirectory(docs/doxgen)
|
add_subdirectory(docs/doxgen)
|
||||||
|
|
||||||
# tests (TODO)
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ def pre_test(){
|
||||||
rm -rf debug
|
rm -rf debug
|
||||||
mkdir debug
|
mkdir debug
|
||||||
cd debug
|
cd debug
|
||||||
cmake .. > /dev/null
|
cmake .. -DBUILD_TEST=true > /dev/null
|
||||||
make -j4> /dev/null
|
make -j4> /dev/null
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
10
Jenkinsfile2
|
@ -173,7 +173,7 @@ def pre_test_build_mac() {
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${WK}/debug
|
cd ${WK}/debug
|
||||||
cmake ..
|
cmake .. -DBUILD_TEST=true
|
||||||
make -j8
|
make -j8
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
|
@ -218,12 +218,12 @@ def pre_test_win(){
|
||||||
if (env.CHANGE_URL =~ /\/TDengine\//) {
|
if (env.CHANGE_URL =~ /\/TDengine\//) {
|
||||||
bat '''
|
bat '''
|
||||||
cd %WIN_INTERNAL_ROOT%
|
cd %WIN_INTERNAL_ROOT%
|
||||||
git pull
|
git pull origin ''' + env.CHANGE_TARGET + '''
|
||||||
'''
|
'''
|
||||||
bat '''
|
bat '''
|
||||||
cd %WIN_COMMUNITY_ROOT%
|
cd %WIN_COMMUNITY_ROOT%
|
||||||
git remote prune origin
|
git remote prune origin
|
||||||
git pull
|
git pull origin ''' + env.CHANGE_TARGET + '''
|
||||||
'''
|
'''
|
||||||
bat '''
|
bat '''
|
||||||
cd %WIN_COMMUNITY_ROOT%
|
cd %WIN_COMMUNITY_ROOT%
|
||||||
|
@ -236,7 +236,7 @@ def pre_test_win(){
|
||||||
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
|
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
|
||||||
bat '''
|
bat '''
|
||||||
cd %WIN_INTERNAL_ROOT%
|
cd %WIN_INTERNAL_ROOT%
|
||||||
git pull
|
git pull origin ''' + env.CHANGE_TARGET + '''
|
||||||
'''
|
'''
|
||||||
bat '''
|
bat '''
|
||||||
cd %WIN_INTERNAL_ROOT%
|
cd %WIN_INTERNAL_ROOT%
|
||||||
|
@ -302,7 +302,7 @@ def pre_test_build_win() {
|
||||||
set CL=/MP8
|
set CL=/MP8
|
||||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
|
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
|
||||||
time /t
|
time /t
|
||||||
cmake .. -G "NMake Makefiles JOM" || exit 7
|
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true || exit 7
|
||||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
|
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
|
||||||
time /t
|
time /t
|
||||||
jom -j 6 || exit 8
|
jom -j 6 || exit 8
|
||||||
|
|
34
README-CN.md
|
@ -39,9 +39,9 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
|
||||||
|
|
||||||
# 构建
|
# 构建
|
||||||
|
|
||||||
TDengine 目前可以在 Linux、 Windows 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
|
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
|
||||||
|
|
||||||
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubenetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
||||||
|
|
||||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||||
|
|
||||||
|
@ -104,6 +104,12 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco
|
||||||
sudo yum config-manager --set-enabled Powertools
|
sudo yum config-manager --set-enabled Powertools
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### macOS
|
||||||
|
|
||||||
|
```
|
||||||
|
brew install argp-standalone pkgconfig
|
||||||
|
```
|
||||||
|
|
||||||
### 设置 golang 开发环境
|
### 设置 golang 开发环境
|
||||||
|
|
||||||
TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。
|
TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。
|
||||||
|
@ -210,14 +216,14 @@ cmake .. -G "NMake Makefiles"
|
||||||
nmake
|
nmake
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- ### macOS 系统
|
### macOS 系统
|
||||||
|
|
||||||
安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
安装 XCode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir debug && cd debug
|
mkdir debug && cd debug
|
||||||
cmake .. && cmake --build .
|
cmake .. && cmake --build .
|
||||||
``` -->
|
```
|
||||||
|
|
||||||
# 安装
|
# 安装
|
||||||
|
|
||||||
|
@ -263,6 +269,24 @@ nmake install
|
||||||
sudo make install
|
sudo make install
|
||||||
```
|
```
|
||||||
|
|
||||||
|
用户可以在[文件目录结构](https://docs.taosdata.com/reference/directory/)中了解更多在操作系统中生成的目录或文件。
|
||||||
|
|
||||||
|
从源代码安装也会为 TDengine 配置服务管理 ,用户也可以选择[从安装包中安装](https://docs.taosdata.com/get-started/package/)。
|
||||||
|
|
||||||
|
安装成功后,可以在应用程序中双击 TDengine 图标启动服务,或者在终端中启动 TDengine 服务:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
launchctl start com.tdengine.taosd
|
||||||
|
```
|
||||||
|
|
||||||
|
用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taos
|
||||||
|
```
|
||||||
|
|
||||||
|
如果 TDengine CLI 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
|
||||||
|
|
||||||
## 快速运行
|
## 快速运行
|
||||||
|
|
||||||
如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
|
如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
|
||||||
|
|
57
README.md
|
@ -15,25 +15,25 @@
|
||||||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||||
|
|
||||||
English | [简体中文](README-CN.md) | [Lean more about TSDB](https://tdengine.com/tsdb)
|
English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine.com) | [Learn more about TSDB](https://tdengine.com/tsdb/)
|
||||||
|
|
||||||
# What is TDengine?
|
# What is TDengine?
|
||||||
|
|
||||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
|
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
|
||||||
|
|
||||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
- **[High Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||||
|
|
||||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||||
|
|
||||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||||
|
|
||||||
- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||||
|
|
||||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||||
|
|
||||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||||
|
|
||||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/)
|
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ For user manual, system design and architecture, please refer to [TDengine Docum
|
||||||
|
|
||||||
# Building
|
# Building
|
||||||
|
|
||||||
At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
||||||
|
|
||||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
||||||
|
|
||||||
|
@ -105,6 +105,12 @@ If the PowerTools installation fails, you can try to use:
|
||||||
sudo yum config-manager --set-enabled powertools
|
sudo yum config-manager --set-enabled powertools
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### macOS
|
||||||
|
|
||||||
|
```
|
||||||
|
brew install argp-standalone pkgconfig
|
||||||
|
```
|
||||||
|
|
||||||
### Setup golang environment
|
### Setup golang environment
|
||||||
|
|
||||||
TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup.
|
TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup.
|
||||||
|
@ -213,14 +219,14 @@ cmake .. -G "NMake Makefiles"
|
||||||
nmake
|
nmake
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- ### On macOS platform
|
### On macOS platform
|
||||||
|
|
||||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
mkdir debug && cd debug
|
mkdir debug && cd debug
|
||||||
cmake .. && cmake --build .
|
cmake .. && cmake --build .
|
||||||
``` -->
|
```
|
||||||
|
|
||||||
# Installing
|
# Installing
|
||||||
|
|
||||||
|
@ -232,9 +238,9 @@ After building successfully, TDengine can be installed by
|
||||||
sudo make install
|
sudo make install
|
||||||
```
|
```
|
||||||
|
|
||||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section.
|
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||||
|
|
||||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.taosdata.com/get-started/package/) for it.
|
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||||
|
|
||||||
To start the service after installation, in a terminal, use:
|
To start the service after installation, in a terminal, use:
|
||||||
|
|
||||||
|
@ -258,7 +264,7 @@ After building successfully, TDengine can be installed by:
|
||||||
nmake install
|
nmake install
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
|
||||||
## On macOS platform
|
## On macOS platform
|
||||||
|
|
||||||
After building successfully, TDengine can be installed by:
|
After building successfully, TDengine can be installed by:
|
||||||
|
@ -266,7 +272,24 @@ After building successfully, TDengine can be installed by:
|
||||||
```bash
|
```bash
|
||||||
sudo make install
|
sudo make install
|
||||||
```
|
```
|
||||||
-->
|
|
||||||
|
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||||
|
|
||||||
|
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||||
|
|
||||||
|
To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
launchctl start com.tdengine.taosd
|
||||||
|
```
|
||||||
|
|
||||||
|
Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taos
|
||||||
|
```
|
||||||
|
|
||||||
|
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
|
||||||
|
|
||||||
## Quick Run
|
## Quick Run
|
||||||
|
|
||||||
|
@ -321,7 +344,11 @@ TDengine provides abundant developing tools for users to develop on TDengine. Fo
|
||||||
|
|
||||||
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
|
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
|
||||||
|
|
||||||
# Join TDengine User Community
|
# Join the TDengine Community
|
||||||
|
|
||||||
- Join [TDengine Discord Channel](https://discord.com/invite/VZdSuUg4pS?utm_id=discord)
|
For more information about TDengine, you can follow us on social media and join our Discord server:
|
||||||
- Join wechat group by adding WeChat “tdengine”
|
|
||||||
|
- [Discord](https://discord.com/invite/VZdSuUg4pS)
|
||||||
|
- [Twitter](https://twitter.com/TaosData)
|
||||||
|
- [LinkedIn](https://www.linkedin.com/company/tdengine/)
|
||||||
|
- [YouTube](https://www.youtube.com/channel/UCmp-1U6GS_3V3hjir6Uq5DQ)
|
||||||
|
|
|
@ -100,6 +100,8 @@ IF (TD_WINDOWS)
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
|
||||||
|
|
||||||
|
SET(JEMALLOC_ENABLED OFF)
|
||||||
|
|
||||||
ELSE ()
|
ELSE ()
|
||||||
IF (${TD_DARWIN})
|
IF (${TD_DARWIN})
|
||||||
set(CMAKE_MACOSX_RPATH 0)
|
set(CMAKE_MACOSX_RPATH 0)
|
||||||
|
@ -117,8 +119,8 @@ ELSE ()
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
MESSAGE(STATUS "Will compile with Address Sanitizer!")
|
MESSAGE(STATUS "Will compile with Address Sanitizer!")
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}")
|
MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||||
|
|
|
@ -2,6 +2,12 @@
|
||||||
# Deps options
|
# Deps options
|
||||||
# =========================================================
|
# =========================================================
|
||||||
|
|
||||||
|
option(
|
||||||
|
BUILD_TEST
|
||||||
|
"If build unit tests using googletest"
|
||||||
|
OFF
|
||||||
|
)
|
||||||
|
|
||||||
IF(${TD_WINDOWS})
|
IF(${TD_WINDOWS})
|
||||||
|
|
||||||
MESSAGE("build pthread Win32")
|
MESSAGE("build pthread Win32")
|
||||||
|
@ -46,12 +52,6 @@ IF(${TD_WINDOWS})
|
||||||
ON
|
ON
|
||||||
)
|
)
|
||||||
|
|
||||||
option(
|
|
||||||
BUILD_TEST
|
|
||||||
"If build unit tests using googletest"
|
|
||||||
ON
|
|
||||||
)
|
|
||||||
|
|
||||||
option(
|
option(
|
||||||
TDENGINE_3
|
TDENGINE_3
|
||||||
"TDengine 3.x for taos-tools"
|
"TDengine 3.x for taos-tools"
|
||||||
|
@ -65,28 +65,8 @@ IF(${TD_WINDOWS})
|
||||||
)
|
)
|
||||||
|
|
||||||
ELSEIF (TD_DARWIN_64)
|
ELSEIF (TD_DARWIN_64)
|
||||||
|
IF(${BUILD_TEST})
|
||||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||||
option(
|
|
||||||
BUILD_TEST
|
|
||||||
"If build unit tests using googletest"
|
|
||||||
ON
|
|
||||||
)
|
|
||||||
ELSE ()
|
|
||||||
include(CheckCXXCompilerFlag)
|
|
||||||
CHECK_CXX_COMPILER_FLAG("-std=c++13" COMPILER_SUPPORTS_CXX13)
|
|
||||||
IF(${COMPILER_SUPPORTS_CXX13})
|
|
||||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
|
||||||
option(
|
|
||||||
BUILD_TEST
|
|
||||||
"If build unit tests using googletest"
|
|
||||||
ON
|
|
||||||
)
|
|
||||||
ELSE ()
|
|
||||||
option(
|
|
||||||
BUILD_TEST
|
|
||||||
"If build unit tests using googletest"
|
|
||||||
OFF
|
|
||||||
)
|
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
|
|
@ -45,10 +45,19 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
||||||
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
|
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
|
||||||
|
|
||||||
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
|
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
|
||||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
|
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
|
||||||
MESSAGE("Current system arch is 64")
|
MESSAGE("Current system arch is arm64")
|
||||||
SET(TD_DARWIN_64 TRUE)
|
SET(TD_DARWIN_64 TRUE)
|
||||||
|
SET(TD_DARWIN_ARM64 TRUE)
|
||||||
ADD_DEFINITIONS("-D_TD_DARWIN_64")
|
ADD_DEFINITIONS("-D_TD_DARWIN_64")
|
||||||
|
ADD_DEFINITIONS("-D_TD_DARWIN_ARM64")
|
||||||
|
ENDIF ()
|
||||||
|
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
|
||||||
|
MESSAGE("Current system arch is x86_64")
|
||||||
|
SET(TD_DARWIN_64 TRUE)
|
||||||
|
SET(TD_DARWIN_X64 TRUE)
|
||||||
|
ADD_DEFINITIONS("-D_TD_DARWIN_64")
|
||||||
|
ADD_DEFINITIONS("-D_TD_DARWIN_X64")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
ADD_DEFINITIONS("-DHAVE_UNISTD_H")
|
ADD_DEFINITIONS("-DHAVE_UNISTD_H")
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.0.1.1")
|
SET(TD_VER_NUMBER "3.0.1.5")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosadapter
|
# taosadapter
|
||||||
ExternalProject_Add(taosadapter
|
ExternalProject_Add(taosadapter
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||||
GIT_TAG 71e7ccf
|
GIT_TAG a11131c
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG e7270c9
|
GIT_TAG f9c1d32
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosws-rs
|
# taosws-rs
|
||||||
ExternalProject_Add(taosws-rs
|
ExternalProject_Add(taosws-rs
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||||
GIT_TAG e771403
|
GIT_TAG 0373a70
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -37,6 +37,11 @@ if(${BUILD_WITH_ICONV})
|
||||||
cat("${TD_SUPPORT_DIR}/iconv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/iconv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# jemalloc
|
||||||
|
if(${JEMALLOC_ENABLED})
|
||||||
|
cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
endif()
|
||||||
|
|
||||||
# msvc regex
|
# msvc regex
|
||||||
if(${BUILD_MSVCREGEX})
|
if(${BUILD_MSVCREGEX})
|
||||||
cat("${TD_SUPPORT_DIR}/msvcregex_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/msvcregex_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
@ -258,6 +263,19 @@ if(${BUILD_PTHREAD})
|
||||||
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# jemalloc
|
||||||
|
if(${JEMALLOC_ENABLED})
|
||||||
|
include(ExternalProject)
|
||||||
|
ExternalProject_Add(jemalloc
|
||||||
|
PREFIX "jemalloc"
|
||||||
|
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||||
|
BUILD_IN_SOURCE 1
|
||||||
|
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls --with-malloc-conf='background_thread:true,metadata_thp:auto'
|
||||||
|
BUILD_COMMAND ${MAKE}
|
||||||
|
)
|
||||||
|
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
|
||||||
|
endif()
|
||||||
|
|
||||||
# crashdump
|
# crashdump
|
||||||
if(${BUILD_CRASHDUMP})
|
if(${BUILD_CRASHDUMP})
|
||||||
add_executable(dumper "crashdump/dumper/dumper.c")
|
add_executable(dumper "crashdump/dumper/dumper.c")
|
||||||
|
|
|
@ -23,8 +23,8 @@ The major features are listed below:
|
||||||
4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
|
4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
|
||||||
5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
||||||
6. Visualization
|
6. Visualization
|
||||||
- Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
|
- Supports seamless integration with [Grafana](../third-party/grafana/).
|
||||||
- Supports seamless integration with Google Data Studio.
|
- Supports seamless integration with [Google Data Studio](../third-party/google-data-studio/).
|
||||||
7. Cluster
|
7. Cluster
|
||||||
- Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
|
- Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
|
||||||
- Supports [deployment on Kubernetes](../deployment/k8s/).
|
- Supports [deployment on Kubernetes](../deployment/k8s/).
|
||||||
|
@ -33,7 +33,7 @@ The major features are listed below:
|
||||||
- Provides [monitoring](../operation/monitor) on running instances of TDengine.
|
- Provides [monitoring](../operation/monitor) on running instances of TDengine.
|
||||||
- Provides many ways to [import](../operation/import) and [export](../operation/export) data.
|
- Provides many ways to [import](../operation/import) and [export](../operation/export) data.
|
||||||
9. Tools
|
9. Tools
|
||||||
- Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
- Provides an interactive [Command Line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||||
- Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
|
- Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
|
||||||
10. Programming
|
10. Programming
|
||||||
- Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
|
- Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
|
||||||
|
|
|
@ -3,11 +3,27 @@ sidebar_label: Docker
|
||||||
title: Quick Install on Docker
|
title: Quick Install on Docker
|
||||||
---
|
---
|
||||||
|
|
||||||
This document describes how to install TDengine in a Docker container and perform queries and inserts. To get started with TDengine in a non-containerized environment, see [Quick Install](../../get-started/package). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||||
|
|
||||||
|
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||||
|
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
|
||||||
|
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||||
|
|
||||||
## Run TDengine
|
## Run TDengine
|
||||||
|
|
||||||
If Docker is already installed on your computer, run the following command:
|
If Docker is already installed on your computer, pull the latest TDengine Docker container image:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker pull tdengine/tdengine:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Or the container image of specific version:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker pull tdengine/tdengine:3.0.1.4
|
||||||
|
```
|
||||||
|
|
||||||
|
And then run the following command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||||
|
@ -46,7 +62,7 @@ taos>
|
||||||
|
|
||||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||||
|
|
||||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a terminal.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taosBenchmark
|
taosBenchmark
|
||||||
|
|
|
@ -7,13 +7,17 @@ import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
import PkgListV3 from "/components/PkgListV3";
|
import PkgListV3 from "/components/PkgListV3";
|
||||||
|
|
||||||
For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
|
||||||
|
|
||||||
|
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||||
|
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
|
||||||
|
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||||
|
|
||||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
||||||
|
|
||||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
||||||
|
|
||||||
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows.
|
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
@ -107,11 +111,18 @@ Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the W
|
||||||
<PkgListV3 type={3}/>
|
<PkgListV3 type={3}/>
|
||||||
2. Run the downloaded package to install TDengine.
|
2. Run the downloaded package to install TDengine.
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem label="macOS" value="macos">
|
||||||
|
|
||||||
|
1. Download the macOS installation package.
|
||||||
|
<PkgListV3 type={7}/>
|
||||||
|
2. Run the downloaded package to install TDengine. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
For information about TDengine releases, see [Release History](../../releases).
|
For information about TDengine other releases, check [Release History](../../releases/tdengine).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -168,18 +179,47 @@ The following `systemctl` commands can help you manage TDengine service:
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## Command Line Interface (CLI)
|
||||||
|
|
||||||
|
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in terminal.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem label="Windows" value="windows">
|
<TabItem label="Windows" value="windows">
|
||||||
|
|
||||||
After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengine Server.
|
After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengine Server.
|
||||||
|
|
||||||
|
## Command Line Interface (CLI)
|
||||||
|
|
||||||
|
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal to start the TDengine command line.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
|
||||||
|
<TabItem label="macOS" value="macos">
|
||||||
|
|
||||||
|
After the installation is complete, double-click the /applications/TDengine to start the program, or run `launchctl start com.tdengine.taosd` to start TDengine Server.
|
||||||
|
|
||||||
|
The following `launchctl` commands can help you manage TDengine service:
|
||||||
|
|
||||||
|
- Start TDengine Server: `launchctl start com.tdengine.taosd`
|
||||||
|
|
||||||
|
- Stop TDengine Server: `launchctl stop com.tdengine.taosd`
|
||||||
|
|
||||||
|
- Check TDengine Server status: `launchctl list | grep taosd`
|
||||||
|
|
||||||
|
:::info
|
||||||
|
|
||||||
|
- The `launchctl` command does not require _root_ privileges. You don't need to use the `sudo` command.
|
||||||
|
- The first content returned by the `launchctl list | grep taosd` command is the PID of the program, if '-' indicates that the TDengine service is not running.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Command Line Interface (CLI)
|
## Command Line Interface (CLI)
|
||||||
|
|
||||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in the Linux terminal where TDengine is installed, or you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal where TDengine is installed to start the TDengine command line.
|
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in terminal.
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taos
|
taos
|
||||||
|
@ -209,13 +249,13 @@ SELECT * FROM t;
|
||||||
Query OK, 2 row(s) in set (0.003128s)
|
Query OK, 2 row(s) in set (0.003128s)
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
||||||
|
|
||||||
## Test data insert performance
|
## Test data insert performance
|
||||||
|
|
||||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||||
|
|
||||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
|
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a terminal.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taosBenchmark
|
taosBenchmark
|
||||||
|
|
|
@ -3,9 +3,9 @@ title: Get Started
|
||||||
description: This article describes how to install TDengine and test its performance.
|
description: This article describes how to install TDengine and test its performance.
|
||||||
---
|
---
|
||||||
|
|
||||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
|
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
|
||||||
|
|
||||||
You can install and run TDengine on Linux and Windows machines as well as Docker containers.
|
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
|
||||||
|
|
||||||
```mdx-code-block
|
```mdx-code-block
|
||||||
import DocCardList from '@theme/DocCardList';
|
import DocCardList from '@theme/DocCardList';
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
```csharp title="Native Connection"
|
```csharp title="Native Connection"
|
||||||
{{#include docs/examples/csharp/ConnectExample.cs}}
|
{{#include docs/examples/csharp/connect/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
||||||
:::info
|
```csharp title="WebSocket Connection"
|
||||||
C# connector supports only native connection for now.
|
{{#include docs/examples/csharp/wsConnect/Program.cs}}
|
||||||
|
```
|
||||||
:::
|
|
||||||
|
|
|
@ -15,10 +15,12 @@ import ConnCSNative from "./_connect_cs.mdx";
|
||||||
import ConnC from "./_connect_c.mdx";
|
import ConnC from "./_connect_c.mdx";
|
||||||
import ConnR from "./_connect_r.mdx";
|
import ConnR from "./_connect_r.mdx";
|
||||||
import ConnPHP from "./_connect_php.mdx";
|
import ConnPHP from "./_connect_php.mdx";
|
||||||
import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx";
|
import InstallOnLinux from "../../14-reference/03-connector/_linux_install.mdx";
|
||||||
import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx";
|
import InstallOnWindows from "../../14-reference/03-connector/_windows_install.mdx";
|
||||||
|
import InstallOnMacOS from "../../14-reference/03-connector/_macos_install.mdx";
|
||||||
import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
|
import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
|
||||||
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
|
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
|
||||||
|
import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx";
|
||||||
|
|
||||||
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](/reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](/reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
||||||
|
|
||||||
|
@ -44,10 +46,13 @@ If you are choosing to use the native connection and the the application is not
|
||||||
|
|
||||||
<Tabs defaultValue="linux" groupId="os">
|
<Tabs defaultValue="linux" groupId="os">
|
||||||
<TabItem value="linux" label="Linux">
|
<TabItem value="linux" label="Linux">
|
||||||
<InstallOnWindows />
|
<InstallOnLinux />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="windows" label="Windows">
|
<TabItem value="windows" label="Windows">
|
||||||
<InstallOnLinux />
|
<InstallOnWindows />
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="macos" label="MacOS">
|
||||||
|
<InstallOnMacOS />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
@ -62,6 +67,9 @@ After the above installation and configuration are done and making sure TDengine
|
||||||
<TabItem value="windows" label="Windows">
|
<TabItem value="windows" label="Windows">
|
||||||
<VerifyWindows />
|
<VerifyWindows />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
<TabItem value="macos" label="MacOS">
|
||||||
|
<VerifyMacOS />
|
||||||
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Install Connectors
|
## Install Connectors
|
||||||
|
@ -138,9 +146,9 @@ Node.js connector provides different ways of establishing connections by providi
|
||||||
|
|
||||||
1. Install Node.js Native Connector
|
1. Install Node.js Native Connector
|
||||||
|
|
||||||
```
|
```
|
||||||
npm install @tdengine/client
|
npm install @tdengine/client
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`.
|
It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`.
|
||||||
|
@ -148,9 +156,9 @@ It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13
|
||||||
|
|
||||||
2. Install Node.js REST Connector
|
2. Install Node.js REST Connector
|
||||||
|
|
||||||
```
|
```
|
||||||
npm install @tdengine/rest
|
npm install @tdengine/rest
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="C#" value="csharp">
|
<TabItem label="C#" value="csharp">
|
||||||
|
|
|
@ -6,8 +6,6 @@ The data model employed by TDengine is similar to that of a relational database.
|
||||||
|
|
||||||
Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)".
|
Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)".
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Create Database
|
## Create Database
|
||||||
|
|
||||||
The characteristics of time-series data from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the size of the cache, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. An example is shown as follows:
|
The characteristics of time-series data from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the size of the cache, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. An example is shown as follows:
|
||||||
|
@ -17,10 +15,11 @@ CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1;
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above SQL statement:
|
In the above SQL statement:
|
||||||
|
|
||||||
- a database named "power" is created
|
- a database named "power" is created
|
||||||
- the data in it is retained for 365 days, which means that data older than 365 days will be deleted automatically
|
- the data in it is retained for 365 days, which means that data older than 365 days will be deleted automatically
|
||||||
- a new data file will be created every 10 days
|
- a new data file will be created every 10 days
|
||||||
- the size of the write cache pool on each vnode is 16 MB
|
- the size of the write cache pool on each VNode is 16 MB
|
||||||
- the number of vgroups is 100
|
- the number of vgroups is 100
|
||||||
- WAL is enabled but fsync is disabled For more details please refer to [Database](/taos-sql/database).
|
- WAL is enabled but fsync is disabled For more details please refer to [Database](/taos-sql/database).
|
||||||
|
|
||||||
|
|
|
@ -34,11 +34,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- All the data in `tag_set` will be converted to nchar type automatically .
|
- All the data in `tag_set` will be converted to NCHAR type automatically .
|
||||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
||||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
||||||
|
- You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
:::
|
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||||
|
:::
|
||||||
|
|
||||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||||
|
|
||||||
|
@ -64,3 +65,11 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
|
||||||
<CLine />
|
<CLine />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
## Query Examples
|
||||||
|
|
||||||
|
If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query SQL:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
||||||
|
```
|
||||||
|
|
|
@ -24,7 +24,7 @@ A single line of text is used in OpenTSDB line protocol to represent one row of
|
||||||
- `metric` will be used as the STable name.
|
- `metric` will be used as the STable name.
|
||||||
- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported.
|
- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported.
|
||||||
- `value` is a metric which must be a numeric value, The corresponding column name is "value".
|
- `value` is a metric which must be a numeric value, The corresponding column name is "value".
|
||||||
- The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically.
|
- The last part is the tag set separated by spaces, all tags will be converted to NCHAR type automatically.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
|
@ -32,7 +32,8 @@ For example:
|
||||||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||||
```
|
```
|
||||||
|
|
||||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify child table names, for example, `smlChildTableName=tname`. You can insert `meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
|
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
@ -64,10 +65,10 @@ taos> use test;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
meters.current | 2022-03-30 17:04:10.877 | 2 | 2 | 2 |
|
meters.current |
|
||||||
meters.voltage | 2022-03-30 17:04:10.882 | 2 | 2 | 2 |
|
meters.voltage |
|
||||||
Query OK, 2 row(s) in set (0.002544s)
|
Query OK, 2 row(s) in set (0.002544s)
|
||||||
|
|
||||||
taos> select tbname, * from `meters.current`;
|
taos> select tbname, * from `meters.current`;
|
||||||
|
@ -79,3 +80,11 @@ taos> select tbname, * from `meters.current`;
|
||||||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
||||||
Query OK, 4 row(s) in set (0.005399s)
|
Query OK, 4 row(s) in set (0.005399s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Query Examples
|
||||||
|
|
||||||
|
If you want query the data of `location=California.LosAngeles groupid=3`,here is the query SQL:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||||
|
```
|
||||||
|
|
|
@ -46,10 +46,10 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type.
|
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||||
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
||||||
|
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify child table names, for example, `smlChildTableName=tname`. You can insert `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
@ -81,10 +81,10 @@ taos> use test;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
meters.current | 2022-03-29 16:05:25.193 | 2 | 2 | 1 |
|
meters.current |
|
||||||
meters.voltage | 2022-03-29 16:05:25.200 | 2 | 2 | 1 |
|
meters.voltage |
|
||||||
Query OK, 2 row(s) in set (0.001954s)
|
Query OK, 2 row(s) in set (0.001954s)
|
||||||
|
|
||||||
taos> select * from `meters.current`;
|
taos> select * from `meters.current`;
|
||||||
|
@ -94,3 +94,11 @@ taos> select * from `meters.current`;
|
||||||
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
||||||
Query OK, 2 row(s) in set (0.004076s)
|
Query OK, 2 row(s) in set (0.004076s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Query Examples
|
||||||
|
|
||||||
|
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query SQL:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||||
|
```
|
||||||
|
|
|
@ -23,9 +23,9 @@ From the perspective of application program, you need to consider:
|
||||||
3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
|
3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
|
||||||
|
|
||||||
4. Data Writing Protocol.
|
4. Data Writing Protocol.
|
||||||
- Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
|
- Parameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
|
||||||
- Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it
|
- Writing to known existing tables is more efficient than writing to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it.
|
||||||
- Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema
|
- Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creates table automatically and may alter table schema.
|
||||||
|
|
||||||
Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
|
Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ Application programs need to read data from data source then write into TDengine
|
||||||
2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
|
2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
|
||||||
3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
|
3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
|
||||||
|
|
||||||
If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
|
If the data source is Kafka, then the application program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
|
||||||
|
|
||||||
1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
|
1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
|
||||||
2. Subscribe multiple topics to accumulate data together.
|
2. Subscribe multiple topics to accumulate data together.
|
||||||
|
@ -56,7 +56,7 @@ This section will introduce the sample programs to demonstrate how to write into
|
||||||
|
|
||||||
### Scenario
|
### Scenario
|
||||||
|
|
||||||
Below are the scenario for the sample programs of high performance wrting.
|
Below are the scenario for the sample programs of high performance writing.
|
||||||
|
|
||||||
- Application program reads data from data source, the sample program simulates a data source by generating data
|
- Application program reads data from data source, the sample program simulates a data source by generating data
|
||||||
- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
|
- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
|
||||||
|
@ -80,7 +80,7 @@ The sample programs assume the source data is for all the different sub tables i
|
||||||
| ---------------- | ----------------------------------------------------------------------------------------------------- |
|
| ---------------- | ----------------------------------------------------------------------------------------------------- |
|
||||||
| FastWriteExample | Main Program |
|
| FastWriteExample | Main Program |
|
||||||
| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
|
| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
|
||||||
| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine |
|
| WriteTask | Read data from Queue, compose a write batch and write into TDengine |
|
||||||
| MockDataSource | Generate data for some sub tables of super table meters |
|
| MockDataSource | Generate data for some sub tables of super table meters |
|
||||||
| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
|
| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
|
||||||
| StmtWriter | Write in Parameter binding mode (Not finished yet) |
|
| StmtWriter | Write in Parameter binding mode (Not finished yet) |
|
||||||
|
@ -95,16 +95,16 @@ The main Program is responsible for:
|
||||||
1. Create message queues
|
1. Create message queues
|
||||||
2. Start writing threads
|
2. Start writing threads
|
||||||
3. Start reading threads
|
3. Start reading threads
|
||||||
4. Otuput writing speed every 10 seconds
|
4. Output writing speed every 10 seconds
|
||||||
|
|
||||||
The main program provides 4 parameters for tuning:
|
The main program provides 4 parameters for tuning:
|
||||||
|
|
||||||
1. The number of reading threads, default value is 1
|
1. The number of reading threads, default value is 1
|
||||||
2. The number of writing threads, default alue is 2
|
2. The number of writing threads, default value is 2
|
||||||
3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
|
3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
|
||||||
4. The batch size of single write, default value is 3,000
|
4. The batch size of single write, default value is 3,000
|
||||||
|
|
||||||
The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug.
|
The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enough.
|
||||||
|
|
||||||
```java
|
```java
|
||||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
|
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
|
||||||
|
@ -179,7 +179,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
||||||
|
|
||||||
**Launch in IDE**
|
**Launch in IDE**
|
||||||
|
|
||||||
1. Clone TDengine repolitory
|
1. Clone TDengine repository
|
||||||
```
|
```
|
||||||
git clone git@github.com:taosdata/TDengine.git --depth 1
|
git clone git@github.com:taosdata/TDengine.git --depth 1
|
||||||
```
|
```
|
||||||
|
@ -282,7 +282,7 @@ Sample programs in Python uses multi-process and cross-process message queues.
|
||||||
| run_read_task Function | Read data and distribute to message queues |
|
| run_read_task Function | Read data and distribute to message queues |
|
||||||
| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
|
| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
|
||||||
| run_write_task Function | Read as much as possible data from message queue and write in batch |
|
| run_write_task Function | Read as much as possible data from message queue and write in batch |
|
||||||
| SQLWriter Class | Write in SQL and create table utomatically |
|
| SQLWriter Class | Write in SQL and create table automatically |
|
||||||
| StmtWriter Class | Write in parameter binding mode (not finished yet) |
|
| StmtWriter Class | Write in parameter binding mode (not finished yet) |
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
@ -292,7 +292,7 @@ Sample programs in Python uses multi-process and cross-process message queues.
|
||||||
|
|
||||||
1. Monitoring process, initializes database and calculating writing speed
|
1. Monitoring process, initializes database and calculating writing speed
|
||||||
2. Reading process (n), reads data from data source
|
2. Reading process (n), reads data from data source
|
||||||
3. Writing process (m), wirtes data into TDengine
|
3. Writing process (m), writes data into TDengine
|
||||||
|
|
||||||
`main` function provides 5 parameters:
|
`main` function provides 5 parameters:
|
||||||
|
|
||||||
|
@ -311,7 +311,7 @@ Sample programs in Python uses multi-process and cross-process message queues.
|
||||||
<details>
|
<details>
|
||||||
<summary>run_monitor_process</summary>
|
<summary>run_monitor_process</summary>
|
||||||
|
|
||||||
Monitoring process initilizes database and monitoring writing speed.
|
Monitoring process initializes database and monitoring writing speed.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/fast_write_example.py:monitor}}
|
{{#include docs/examples/python/fast_write_example.py:monitor}}
|
||||||
|
@ -372,7 +372,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
|
||||||
|
|
||||||
<summary>Launch Sample Program in Python</summary>
|
<summary>Launch Sample Program in Python</summary>
|
||||||
|
|
||||||
1. Prerequisities
|
1. Prerequisites
|
||||||
|
|
||||||
- TDengine client driver has been installed
|
- TDengine client driver has been installed
|
||||||
- Python3 has been installed, the the version >= 3.8
|
- Python3 has been installed, the the version >= 3.8
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/InfluxDBLineExample.cs}}
|
{{#include docs/examples/csharp/influxdbLine/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/OptsJsonExample.cs}}
|
{{#include docs/examples/csharp/optsJSON/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/OptsTelnetExample.cs}}
|
{{#include docs/examples/csharp/optsTelnet/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/SQLInsertExample.cs}}
|
{{#include docs/examples/csharp/sqlInsert/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/StmtInsertExample.cs}}
|
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/QueryExample.cs}}
|
{{#include docs/examples/csharp/query/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/AsyncQueryExample.cs}}
|
{{#include docs/examples/csharp/asyncQuery/Program.cs}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
```csharp
|
```csharp
|
||||||
{{#include docs/examples/csharp/SubscribeDemo.cs}}
|
{{#include docs/examples/csharp/subscribe/Program.cs}}
|
||||||
```
|
```
|
|
@ -3,6 +3,7 @@ title: Developer Guide
|
||||||
---
|
---
|
||||||
|
|
||||||
Before creating an application to process time-series data with TDengine, consider the following:
|
Before creating an application to process time-series data with TDengine, consider the following:
|
||||||
|
|
||||||
1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages.
|
1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages.
|
||||||
2. Design the data model based on your own use cases. Consider the main [concepts](/concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
|
2. Design the data model based on your own use cases. Consider the main [concepts](/concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
|
||||||
3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
|
3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
|
||||||
|
|
|
@ -1,67 +1,67 @@
|
||||||
---
|
---
|
||||||
sidebar_label: Data Types
|
sidebar_label: Data Types
|
||||||
title: Data Types
|
title: Data Types
|
||||||
description: "TDengine supports a variety of data types including timestamp, float, JSON and many others."
|
description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.'
|
||||||
---
|
---
|
||||||
|
|
||||||
## Timestamp
|
## Timestamp
|
||||||
|
|
||||||
When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below:
|
When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below:
|
||||||
|
|
||||||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
|
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
||||||
- Internal function `now` can be used to get the current timestamp on the client side
|
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
||||||
- The current timestamp of the client side is applied when `now` is used to insert data
|
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
||||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||||
- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||||
|
|
||||||
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE db_name PRECISION 'ns';
|
CREATE DATABASE db_name PRECISION 'ns';
|
||||||
```
|
```
|
||||||
|
|
||||||
## Data Types
|
## Data Types
|
||||||
|
|
||||||
In TDengine, the data types below can be used when specifying a column or tag.
|
In TDengine, the data types below can be used when specifying a column or tag.
|
||||||
|
|
||||||
| # | **type** | **Bytes** | **Description** |
|
| # | **type** | **Bytes** | **Description** |
|
||||||
| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported |
|
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] |
|
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||||
| 3 | INT UNSIGNED| 4| unsigned integer, the value range is [0, 2^32-1]
|
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] |
|
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
||||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1] |
|
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
||||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
|
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
||||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
|
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] |
|
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||||
| 10 | INT UNSIGNED| 2| unsigned integer, the value range is [0, 65535]|
|
| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] |
|
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255] |
|
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||||
| 13 | BOOL | 1 | Bool, the value range is {true, false} |
|
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||||
| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type |
|
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||||
|
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
|
|
||||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||||
- The length of BINARY can be up to 16374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
TDengine supports a variety of constants:
|
TDengine supports a variety of constants:
|
||||||
|
|
||||||
| # | **Syntax** | **Type** | **Description** |
|
| # | **Syntax** | **Type** | **Description** |
|
||||||
| --- | :-------: | --------- | -------------------------------------- |
|
| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
|
| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
|
||||||
| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
|
| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
|
||||||
| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
|
| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
|
||||||
| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash (\'). |
|
| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash `\'`. |
|
||||||
| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash (\"). |
|
| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash `\"`. |
|
||||||
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
|
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
|
||||||
| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
|
| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
|
||||||
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
|
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
|
||||||
|
|
|
@ -49,6 +49,55 @@ The preceding SQL statement can be used in migration scenarios. It returns the C
|
||||||
DESCRIBE [db_name.]stb_name;
|
DESCRIBE [db_name.]stb_name;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### View tag information for all child tables in the supertable
|
||||||
|
|
||||||
|
```
|
||||||
|
taos> SHOW TABLE TAGS FROM st1;
|
||||||
|
tbname | id | loc |
|
||||||
|
======================================================================
|
||||||
|
st1s1 | 1 | beijing |
|
||||||
|
st1s2 | 2 | shanghai |
|
||||||
|
st1s3 | 3 | guangzhou |
|
||||||
|
Query OK, 3 rows in database (0.004455s)
|
||||||
|
```
|
||||||
|
|
||||||
|
The first column of the returned result set is the subtable name, and the subsequent columns are the tag columns.
|
||||||
|
|
||||||
|
If you already know the name of the tag column, you can use the following statement to get the value of the specified tag column.
|
||||||
|
|
||||||
|
```
|
||||||
|
taos> SELECT DISTINCT TBNAME, id FROM st1;
|
||||||
|
tbname | id |
|
||||||
|
===============================================
|
||||||
|
st1s1 | 1 |
|
||||||
|
st1s2 | 2 |
|
||||||
|
st1s3 | 3 |
|
||||||
|
Query OK, 3 rows in database (0.002891s)
|
||||||
|
```
|
||||||
|
|
||||||
|
It should be noted that DISTINCT and TBNAME in the SELECT statement are essential, and TDengine will optimize the statement according to them, so that it can return the tag value correctly and quickly even when there is no data or a lot of data.
|
||||||
|
|
||||||
|
### View the tag information of a subtable
|
||||||
|
|
||||||
|
```
|
||||||
|
taos> SHOW TAGS FROM st1s1;
|
||||||
|
table_name | db_name | stable_name | tag_name | tag_type | tag_value |
|
||||||
|
============================================================================================================
|
||||||
|
st1s1 | test | st1 | id | INT | 1 |
|
||||||
|
st1s1 | test | st1 | loc | VARCHAR(20) | beijing |
|
||||||
|
Query OK, 2 rows in database (0.003684s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Similarly, you can also use the SELECT statement to query the value of the specified tag column.
|
||||||
|
|
||||||
|
```
|
||||||
|
taos> SELECT DISTINCT TBNAME, id, loc FROM st1s1;
|
||||||
|
tbname | id | loc |
|
||||||
|
==================================================
|
||||||
|
st1s1 | 1 | beijing |
|
||||||
|
Query OK, 1 rows in database (0.001884s)
|
||||||
|
```
|
||||||
|
|
||||||
## Drop STable
|
## Drop STable
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -16,6 +16,8 @@ INSERT INTO
|
||||||
[(field1_name, ...)]
|
[(field1_name, ...)]
|
||||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||||
...];
|
...];
|
||||||
|
|
||||||
|
INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||||
```
|
```
|
||||||
|
|
||||||
**Timestamps**
|
**Timestamps**
|
||||||
|
@ -37,7 +39,7 @@ INSERT INTO
|
||||||
|
|
||||||
4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
|
4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
|
||||||
|
|
||||||
5. A single INSERT statement can write data to multiple tables.
|
5. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
|
||||||
|
|
||||||
6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
|
6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
|
||||||
|
|
||||||
|
@ -47,6 +49,8 @@ INSERT INTO
|
||||||
|
|
||||||
7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
||||||
|
|
||||||
|
8. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
|
||||||
|
|
||||||
## Insert a Record
|
## Insert a Record
|
||||||
|
|
||||||
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
|
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
|
||||||
|
@ -104,11 +108,11 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('202
|
||||||
|
|
||||||
## Insert Rows From A File
|
## Insert Rows From A File
|
||||||
|
|
||||||
Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data:
|
Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and timestamp and string field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data:
|
||||||
|
|
||||||
```
|
```
|
||||||
'2021-07-13 14:07:34.630', '10.2', '219', '0.32'
|
'2021-07-13 14:07:34.630', 10.2, 219, 0.32
|
||||||
'2021-07-13 14:07:35.779', '10.15', '217', '0.33'
|
'2021-07-13 14:07:35.779', 10.15, 217, 0.33
|
||||||
```
|
```
|
||||||
|
|
||||||
Then data in this file can be inserted by the SQL statement below:
|
Then data in this file can be inserted by the SQL statement below:
|
||||||
|
|
|
@ -11,7 +11,7 @@ SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW
|
||||||
SELECT [DISTINCT] select_list
|
SELECT [DISTINCT] select_list
|
||||||
from_clause
|
from_clause
|
||||||
[WHERE condition]
|
[WHERE condition]
|
||||||
[PARTITION BY tag_list]
|
[partition_by_clause]
|
||||||
[window_clause]
|
[window_clause]
|
||||||
[group_by_clause]
|
[group_by_clause]
|
||||||
[order_by_clasue]
|
[order_by_clasue]
|
||||||
|
@ -52,6 +52,9 @@ window_clause: {
|
||||||
| STATE_WINDOW(col)
|
| STATE_WINDOW(col)
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
|
partition_by_clause:
|
||||||
|
PARTITION BY expr [, expr] ...
|
||||||
|
|
||||||
group_by_clause:
|
group_by_clause:
|
||||||
GROUP BY expr [, expr] ... HAVING condition
|
GROUP BY expr [, expr] ... HAVING condition
|
||||||
|
|
||||||
|
@ -66,9 +69,9 @@ order_expr:
|
||||||
|
|
||||||
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
|
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
|
||||||
|
|
||||||
## Wildcards
|
### Wildcards
|
||||||
|
|
||||||
You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included.
|
You can use an asterisk (\*) as a wildcard character to indicate all columns. For normal tables or sub-tables, the asterisk indicates only data columns. For supertables, tag columns are also included when using asterisk (\*).
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT * FROM d1001;
|
SELECT * FROM d1001;
|
||||||
|
@ -136,6 +139,8 @@ taos> SELECT ts, ts AS primary_key_ts FROM d1001;
|
||||||
|
|
||||||
### Pseudocolumns
|
### Pseudocolumns
|
||||||
|
|
||||||
|
**Pseudocolumn:** A pseudo-column behaves like a table column but is not actually stored in the table. You can select from pseudo-columns, but you cannot insert, update, or delete their values. A pseudo-column is also similar to a function without arguments. This section describes these pseudo-columns:
|
||||||
|
|
||||||
**TBNAME**
|
**TBNAME**
|
||||||
The TBNAME pseudocolumn in a supertable contains the names of subtables within the supertable.
|
The TBNAME pseudocolumn in a supertable contains the names of subtables within the supertable.
|
||||||
|
|
||||||
|
@ -179,6 +184,14 @@ In TDengine, the first column of all tables must be a timestamp. This column is
|
||||||
select _rowts, max(current) from meters;
|
select _rowts, max(current) from meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**\_IROWTS**
|
||||||
|
|
||||||
|
The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolumn can be used to retrieve the corresponding timestamp column associated with the interpolation results.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
|
||||||
|
```
|
||||||
|
|
||||||
## Query Objects
|
## Query Objects
|
||||||
|
|
||||||
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
||||||
|
|
|
@ -13,7 +13,7 @@ Single row functions return a result for each row.
|
||||||
#### ABS
|
#### ABS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
ABS(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The absolute value of a specific field.
|
**Description**: The absolute value of a specific field.
|
||||||
|
@ -31,7 +31,7 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### ACOS
|
#### ACOS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
ACOS(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The arc cosine of a specific field.
|
**Description**: The arc cosine of a specific field.
|
||||||
|
@ -49,7 +49,7 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### ASIN
|
#### ASIN
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
ASIN(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The arc sine of a specific field.
|
**Description**: The arc sine of a specific field.
|
||||||
|
@ -68,7 +68,7 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### ATAN
|
#### ATAN
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
ATAN(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The arc tangent of a specific field.
|
**Description**: The arc tangent of a specific field.
|
||||||
|
@ -87,7 +87,7 @@ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### CEIL
|
#### CEIL
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
CEIL(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The rounded up value of a specific field
|
**Description**: The rounded up value of a specific field
|
||||||
|
@ -105,7 +105,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
#### COS
|
#### COS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
COS(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The cosine of a specific field.
|
**Description**: The cosine of a specific field.
|
||||||
|
@ -123,7 +123,7 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### FLOOR
|
#### FLOOR
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
FLOOR(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The rounded down value of a specific field
|
**Description**: The rounded down value of a specific field
|
||||||
|
@ -132,7 +132,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
#### LOG
|
#### LOG
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
|
LOG(expr [, base])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The logarithm of a specific field with `base` as the radix. If you do not enter a base, the natural logarithm of the field is returned.
|
**Description**: The logarithm of a specific field with `base` as the radix. If you do not enter a base, the natural logarithm of the field is returned.
|
||||||
|
@ -151,7 +151,7 @@ SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### POW
|
#### POW
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
POW(expr, power)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The power of a specific field with `power` as the exponent.
|
**Description**: The power of a specific field with `power` as the exponent.
|
||||||
|
@ -170,7 +170,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### ROUND
|
#### ROUND
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
ROUND(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The rounded value of a specific field.
|
**Description**: The rounded value of a specific field.
|
||||||
|
@ -180,7 +180,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
#### SIN
|
#### SIN
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
SIN(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The sine of a specific field.
|
**Description**: The sine of a specific field.
|
||||||
|
@ -198,7 +198,7 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### SQRT
|
#### SQRT
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
SQRT(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The square root of a specific field.
|
**Description**: The square root of a specific field.
|
||||||
|
@ -216,7 +216,7 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### TAN
|
#### TAN
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
TAN(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The tangent of a specific field.
|
**Description**: The tangent of a specific field.
|
||||||
|
@ -238,7 +238,7 @@ Concatenation functions take strings as input and produce string or numeric valu
|
||||||
#### CHAR_LENGTH
|
#### CHAR_LENGTH
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
CHAR_LENGTH(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The length in number of characters of a string
|
**Description**: The length in number of characters of a string
|
||||||
|
@ -254,7 +254,7 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### CONCAT
|
#### CONCAT
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
CONCAT(expr1, expr2 [, expr] ...)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The concatenation result of two or more strings
|
**Description**: The concatenation result of two or more strings
|
||||||
|
@ -271,7 +271,7 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER
|
||||||
#### CONCAT_WS
|
#### CONCAT_WS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The concatenation result of two or more strings with separator
|
**Description**: The concatenation result of two or more strings with separator
|
||||||
|
@ -288,7 +288,7 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st
|
||||||
#### LENGTH
|
#### LENGTH
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
LENGTH(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The length in bytes of a string
|
**Description**: The length in bytes of a string
|
||||||
|
@ -305,7 +305,7 @@ SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### LOWER
|
#### LOWER
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
LOWER(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Convert the input string to lower case
|
**Description**: Convert the input string to lower case
|
||||||
|
@ -322,7 +322,7 @@ SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### LTRIM
|
#### LTRIM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
LTRIM(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Remove the left leading blanks of a string
|
**Description**: Remove the left leading blanks of a string
|
||||||
|
@ -339,7 +339,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### RTRIM
|
#### RTRIM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
LTRIM(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Remove the right tailing blanks of a string
|
**Description**: Remove the right tailing blanks of a string
|
||||||
|
@ -356,7 +356,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### SUBSTR
|
#### SUBSTR
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
|
SUBSTR(expr, pos [, len])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` - If `len` is not specified, it means from `pos` to the end.
|
**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` - If `len` is not specified, it means from `pos` to the end.
|
||||||
|
@ -373,7 +373,7 @@ SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### UPPER
|
#### UPPER
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
UPPER(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Convert the input string to upper case
|
**Description**: Convert the input string to upper case
|
||||||
|
@ -394,10 +394,10 @@ Conversion functions change the data type of a value.
|
||||||
#### CAST
|
#### CAST
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
CAST(expr AS type_name)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Convert the input data `expression` into the type specified by `type_name`. This function can be used only in SELECT statements.
|
**Description**: Convert the input data `expr` into the type specified by `type_name`. This function can be used only in SELECT statements.
|
||||||
|
|
||||||
**Return value type**: The type specified by parameter `type_name`
|
**Return value type**: The type specified by parameter `type_name`
|
||||||
|
|
||||||
|
@ -418,7 +418,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
#### TO_ISO8601
|
#### TO_ISO8601
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
TO_ISO8601(expr [, timezone])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
|
**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
|
||||||
|
@ -441,7 +441,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
#### TO_JSON
|
#### TO_JSON
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
|
TO_JSON(str_literal)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Converts a string into JSON.
|
**Description**: Converts a string into JSON.
|
||||||
|
@ -458,7 +458,7 @@ SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
#### TO_UNIXTIMESTAMP
|
#### TO_UNIXTIMESTAMP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause];
|
TO_UNIXTIMESTAMP(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: UNIX timestamp converted from a string of date/time format
|
**Description**: UNIX timestamp converted from a string of date/time format
|
||||||
|
@ -486,9 +486,7 @@ All functions that return the current time, such as `NOW`, `TODAY`, and `TIMEZON
|
||||||
#### NOW
|
#### NOW
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
|
NOW()
|
||||||
SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
|
|
||||||
INSERT INTO tb_name VALUES (NOW(), ...);
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The current time of the client side system
|
**Description**: The current time of the client side system
|
||||||
|
@ -511,7 +509,7 @@ INSERT INTO tb_name VALUES (NOW(), ...);
|
||||||
#### TIMEDIFF
|
#### TIMEDIFF
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
|
TIMEDIFF(expr1, expr2 [, time_unit])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit`
|
**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit`
|
||||||
|
@ -534,7 +532,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM
|
||||||
#### TIMETRUNCATE
|
#### TIMETRUNCATE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause];
|
TIMETRUNCATE(expr, time_unit)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Truncate the input timestamp with unit specified by `time_unit`
|
**Description**: Truncate the input timestamp with unit specified by `time_unit`
|
||||||
|
@ -555,7 +553,7 @@ SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name
|
||||||
#### TIMEZONE
|
#### TIMEZONE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
|
TIMEZONE()
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The timezone of the client side system
|
**Description**: The timezone of the client side system
|
||||||
|
@ -570,9 +568,7 @@ SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
|
||||||
#### TODAY
|
#### TODAY
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
|
TODAY()
|
||||||
SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
|
|
||||||
INSERT INTO tb_name VALUES (TODAY(), ...);
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The timestamp of 00:00:00 of the client side system
|
**Description**: The timestamp of 00:00:00 of the client side system
|
||||||
|
@ -599,7 +595,12 @@ TDengine supports the following aggregate functions:
|
||||||
### APERCENTILE
|
### APERCENTILE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause]
|
APERCENTILE(expr, p [, algo_type])
|
||||||
|
|
||||||
|
algo_type: {
|
||||||
|
"default"
|
||||||
|
| "t-digest"
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Similar to `PERCENTILE`, but a simulated result is returned
|
**Description**: Similar to `PERCENTILE`, but a simulated result is returned
|
||||||
|
@ -611,14 +612,14 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
|
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||||
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||||
|
|
||||||
### AVG
|
### AVG
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
AVG(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The average value of the specified fields.
|
**Description**: The average value of the specified fields.
|
||||||
|
@ -633,7 +634,7 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
||||||
### COUNT
|
### COUNT
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
COUNT({* | expr})
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The number of records in the specified fields.
|
**Description**: The number of records in the specified fields.
|
||||||
|
@ -653,7 +654,7 @@ If you input a specific column, the number of non-null values in the column is r
|
||||||
### ELAPSED
|
### ELAPSED
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
|
ELAPSED(ts_primary_key [, time_unit])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||||
|
@ -665,7 +666,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
||||||
**Applicable tables**: table, STable, outter in nested query
|
**Applicable tables**: table, STable, outter in nested query
|
||||||
|
|
||||||
**Explanations**:
|
**Explanations**:
|
||||||
- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key.
|
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||||
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
||||||
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
||||||
- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window.
|
- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window.
|
||||||
|
@ -679,7 +680,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
||||||
### LEASTSQUARES
|
### LEASTSQUARES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
|
LEASTSQUARES(expr, start_val, step_val)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
|
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
|
||||||
|
@ -694,7 +695,7 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
|
||||||
### SPREAD
|
### SPREAD
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
SPREAD(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The difference between the max and the min of a specific column
|
**Description**: The difference between the max and the min of a specific column
|
||||||
|
@ -709,7 +710,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
### STDDEV
|
### STDDEV
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
STDDEV(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Standard deviation of a specific column in a table or STable
|
**Description**: Standard deviation of a specific column in a table or STable
|
||||||
|
@ -724,7 +725,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
||||||
### SUM
|
### SUM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
SUM(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The sum of a specific column in a table or STable
|
**Description**: The sum of a specific column in a table or STable
|
||||||
|
@ -739,7 +740,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
||||||
### HYPERLOGLOG
|
### HYPERLOGLOG
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
HYPERLOGLOG(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:
|
**Description**:
|
||||||
|
@ -756,7 +757,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
### HISTOGRAM
|
### HISTOGRAM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
|
HISTOGRAM(expr,bin_type, bin_description, normalized)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:Returns count of data points in user-specified ranges.
|
**Description**:Returns count of data points in user-specified ranges.
|
||||||
|
@ -786,7 +787,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
||||||
### PERCENTILE
|
### PERCENTILE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
PERCENTILE(expr, p)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
|
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
|
||||||
|
@ -797,7 +798,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||||
|
|
||||||
**Applicable table types**: table only
|
**Applicable table types**: table only
|
||||||
|
|
||||||
**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
|
**More explanations**: _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||||
|
|
||||||
|
|
||||||
## Selection Functions
|
## Selection Functions
|
||||||
|
@ -807,7 +808,7 @@ Selection functions return one or more results depending. You can specify the ti
|
||||||
### BOTTOM
|
### BOTTOM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
BOTTOM(expr, k)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
||||||
|
@ -827,7 +828,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
### FIRST
|
### FIRST
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
FIRST(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The first non-null value of a specific column in a table or STable
|
**Description**: The first non-null value of a specific column in a table or STable
|
||||||
|
@ -847,7 +848,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
|
INTERP(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
||||||
|
@ -862,15 +863,17 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RA
|
||||||
|
|
||||||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||||
|
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
||||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||||
- Interpolation is performed based on `FILL` parameter.
|
- Interpolation is performed based on `FILL` parameter.
|
||||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||||
|
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
LAST(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The last non-NULL value of a specific column in a table or STable
|
**Description**: The last non-NULL value of a specific column in a table or STable
|
||||||
|
@ -891,7 +894,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
### LAST_ROW
|
### LAST_ROW
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
LAST_ROW(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The last row of a table or STable
|
**Description**: The last row of a table or STable
|
||||||
|
@ -910,7 +913,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
||||||
### MAX
|
### MAX
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
MAX(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The maximum value of a specific column of a table or STable
|
**Description**: The maximum value of a specific column of a table or STable
|
||||||
|
@ -925,7 +928,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
### MIN
|
### MIN
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
MIN(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The minimum value of a specific column in a table or STable
|
**Description**: The minimum value of a specific column in a table or STable
|
||||||
|
@ -940,10 +943,10 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||||
### MODE
|
### MODE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
MODE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence.
|
**Description**:The value which has the highest frequency of occurrence. One random value is returned if there are multiple values which have highest frequency of occurrence.
|
||||||
|
|
||||||
**Return value type**: Same as the input data
|
**Return value type**: Same as the input data
|
||||||
|
|
||||||
|
@ -955,7 +958,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
||||||
### SAMPLE
|
### SAMPLE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
SAMPLE(expr, k)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
|
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
|
||||||
|
@ -977,7 +980,7 @@ This function cannot be used in expression calculation.
|
||||||
### TAIL
|
### TAIL
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
TAIL(expr, k, offset_val)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`.
|
**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`.
|
||||||
|
@ -994,7 +997,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
||||||
### TOP
|
### TOP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
TOP(expr, k)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
||||||
|
@ -1014,7 +1017,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||||
### UNIQUE
|
### UNIQUE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
UNIQUE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used.
|
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used.
|
||||||
|
@ -1033,7 +1036,7 @@ TDengine includes extensions to standard SQL that are intended specifically for
|
||||||
### CSUM
|
### CSUM
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
CSUM(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows.
|
**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows.
|
||||||
|
@ -1056,7 +1059,12 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
### DERIVATIVE
|
### DERIVATIVE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
|
DERIVATIVE(expr, time_inerval, ignore_negative)
|
||||||
|
|
||||||
|
ignore_negative: {
|
||||||
|
0
|
||||||
|
| 1
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
|
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
|
||||||
|
@ -1075,7 +1083,12 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
||||||
### DIFF
|
### DIFF
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
|
DIFF(expr [, ignore_negative])
|
||||||
|
|
||||||
|
ignore_negative: {
|
||||||
|
0
|
||||||
|
| 1
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
|
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
|
||||||
|
@ -1095,7 +1108,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
||||||
### IRATE
|
### IRATE
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
IRATE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
|
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
|
||||||
|
@ -1110,7 +1123,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
||||||
### MAVG
|
### MAVG
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
MAVG(expr, k)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000].
|
**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000].
|
||||||
|
@ -1133,7 +1146,7 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||||
### STATECOUNT
|
### STATECOUNT
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
|
STATECOUNT(expr, oper, val)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped.
|
**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped.
|
||||||
|
@ -1160,7 +1173,7 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
|
||||||
### STATEDURATION
|
### STATEDURATION
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause];
|
STATEDURATION(expr, oper, val, unit)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped.
|
**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped.
|
||||||
|
@ -1188,7 +1201,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
|
||||||
### TWA
|
### TWA
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT TWA(field_name) FROM tb_name WHERE clause;
|
TWA(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: Time weighted average on a specific column within a time range
|
**Description**: Time weighted average on a specific column within a time range
|
||||||
|
|
|
@ -46,7 +46,7 @@ The following restrictions apply:
|
||||||
|
|
||||||
### Other Rules
|
### Other Rules
|
||||||
|
|
||||||
- The window clause must occur after the PARTITION BY clause and before the GROUP BY clause. It cannot be used with a GROUP BY clause.
|
- The window clause must occur after the PARTITION BY clause. It cannot be used with a GROUP BY clause.
|
||||||
- SELECT clauses on windows can contain only the following expressions:
|
- SELECT clauses on windows can contain only the following expressions:
|
||||||
- Constants
|
- Constants
|
||||||
- Aggregate functions
|
- Aggregate functions
|
||||||
|
@ -78,7 +78,7 @@ These pseudocolumns occur after the aggregation clause.
|
||||||
|
|
||||||
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
|
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
|
||||||
2. The result set is in ascending order of timestamp when you aggregate by time window.
|
2. The result set is in ascending order of timestamp when you aggregate by time window.
|
||||||
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `PARTITION BY` is not used in the query, the result set will be returned in strict ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group.
|
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `PARTITION BY` is not used in the query, the result set will be returned in strict ascending order of timestamp; otherwise the result set will be returned in the order of ascending timestamp in each group.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -120,6 +120,12 @@ In case of using integer, bool, or string to represent the status of a device at
|
||||||
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
|
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Only care about the information of the status window when the status is 2. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
SELECT * FROM (SELECT COUNT(*) AS cnt, FIRST(ts) AS fst, status FROM temp_tb_1 STATE_WINDOW(status)) t WHERE status = 2;
|
||||||
|
```
|
||||||
|
|
||||||
### Session Window
|
### Session Window
|
||||||
|
|
||||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||||
|
|
|
@ -343,6 +343,7 @@ The following list shows all reserved keywords:
|
||||||
### \_
|
### \_
|
||||||
|
|
||||||
- \_C0
|
- \_C0
|
||||||
|
- \_IROWTS
|
||||||
- \_QDURATION
|
- \_QDURATION
|
||||||
- \_QEND
|
- \_QEND
|
||||||
- \_QSTART
|
- \_QSTART
|
||||||
|
|
|
@ -40,30 +40,36 @@ ALTER ALL DNODES dnode_option
|
||||||
|
|
||||||
dnode_option: {
|
dnode_option: {
|
||||||
'resetLog'
|
'resetLog'
|
||||||
| 'balance' value
|
| 'balance' 'value'
|
||||||
| 'monitor' value
|
| 'monitor' 'value'
|
||||||
| 'debugFlag' value
|
| 'debugFlag' 'value'
|
||||||
| 'monDebugFlag' value
|
| 'monDebugFlag' 'value'
|
||||||
| 'vDebugFlag' value
|
| 'vDebugFlag' 'value'
|
||||||
| 'mDebugFlag' value
|
| 'mDebugFlag' 'value'
|
||||||
| 'cDebugFlag' value
|
| 'cDebugFlag' 'value'
|
||||||
| 'httpDebugFlag' value
|
| 'httpDebugFlag' 'value'
|
||||||
| 'qDebugflag' value
|
| 'qDebugflag' 'value'
|
||||||
| 'sdbDebugFlag' value
|
| 'sdbDebugFlag' 'value'
|
||||||
| 'uDebugFlag' value
|
| 'uDebugFlag' 'value'
|
||||||
| 'tsdbDebugFlag' value
|
| 'tsdbDebugFlag' 'value'
|
||||||
| 'sDebugflag' value
|
| 'sDebugflag' 'value'
|
||||||
| 'rpcDebugFlag' value
|
| 'rpcDebugFlag' 'value'
|
||||||
| 'dDebugFlag' value
|
| 'dDebugFlag' 'value'
|
||||||
| 'mqttDebugFlag' value
|
| 'mqttDebugFlag' 'value'
|
||||||
| 'wDebugFlag' value
|
| 'wDebugFlag' 'value'
|
||||||
| 'tmrDebugFlag' value
|
| 'tmrDebugFlag' 'value'
|
||||||
| 'cqDebugFlag' value
|
| 'cqDebugFlag' 'value'
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The parameters that you can modify through this statement are the same as those located in the dnode configuration file. Modifications that you make through this statement take effect immediately, while modifications to the configuration file take effect when the dnode restarts.
|
The parameters that you can modify through this statement are the same as those located in the dnode configuration file. Modifications that you make through this statement take effect immediately, while modifications to the configuration file take effect when the dnode restarts.
|
||||||
|
|
||||||
|
`value` is the value of the parameter, which needs to be in character format. For example, modify the log output level of dnode 1 to debug:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER DNODE 1 'debugFlag' '143';
|
||||||
|
```
|
||||||
|
|
||||||
## Add an Mnode
|
## Add an Mnode
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -121,11 +127,11 @@ ALTER LOCAL local_option
|
||||||
|
|
||||||
local_option: {
|
local_option: {
|
||||||
'resetLog'
|
'resetLog'
|
||||||
| 'rpcDebugFlag' value
|
| 'rpcDebugFlag' 'value'
|
||||||
| 'tmrDebugFlag' value
|
| 'tmrDebugFlag' 'value'
|
||||||
| 'cDebugFlag' value
|
| 'cDebugFlag' 'value'
|
||||||
| 'uDebugFlag' value
|
| 'uDebugFlag' 'value'
|
||||||
| 'debugFlag' value
|
| 'debugFlag' 'value'
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -136,19 +142,3 @@ The parameters that you can modify through this statement are the same as those
|
||||||
```sql
|
```sql
|
||||||
SHOW LOCAL VARIABLES;
|
SHOW LOCAL VARIABLES;
|
||||||
```
|
```
|
||||||
|
|
||||||
## Combine Vgroups
|
|
||||||
|
|
||||||
```sql
|
|
||||||
MERGE VGROUP vgroup_no1 vgroup_no2;
|
|
||||||
```
|
|
||||||
|
|
||||||
If load and data are not properly balanced among vgroups due to the data in different tim lines having different characteristics, you can combine or separate vgroups.
|
|
||||||
|
|
||||||
## Separate Vgroups
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SPLIT VGROUP vgroup_no;
|
|
||||||
```
|
|
||||||
|
|
||||||
This statement creates a new vgroup and migrates part of the data from the original vgroup to the new vgroup with consistent hashing. During this process, the original vgroup can continue to provide services normally.
|
|
||||||
|
|
|
@ -11,7 +11,15 @@ TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide acce
|
||||||
4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems.
|
4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems.
|
||||||
5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables.
|
5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables.
|
||||||
|
|
||||||
Note: SHOW statements are still supported for the convenience of existing users.
|
:::info
|
||||||
|
|
||||||
|
- SHOW statements are still supported for the convenience of existing users.
|
||||||
|
- Some columns in the system table may be keywords, and you need to use the escape character '\`' when querying, for example, to query the VGROUPS in the database `test`:
|
||||||
|
```sql
|
||||||
|
select `vgroups` from ins_databases where name = 'test';
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
||||||
|
|
||||||
|
@ -21,8 +29,8 @@ Provides information about dnodes. Similar to SHOW DNODES.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :------------: | ------------ | ------------------------- |
|
| --- | :------------: | ------------ | ------------------------- |
|
||||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode |
|
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 2 | vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||||
| 3 | status | BINARY(10) | Current status |
|
| 3 | status | BINARY(10) | Current status |
|
||||||
| 4 | note | BINARY(256) | Reason for going offline or other information |
|
| 4 | note | BINARY(256) | Reason for going offline or other information |
|
||||||
| 5 | id | SMALLINT | Dnode ID |
|
| 5 | id | SMALLINT | Dnode ID |
|
||||||
|
@ -41,16 +49,6 @@ Provides information about mnodes. Similar to SHOW MNODES.
|
||||||
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
|
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
|
||||||
| 5 | create_time | TIMESTAMP | Creation time |
|
| 5 | create_time | TIMESTAMP | Creation time |
|
||||||
|
|
||||||
## INS_MODULES
|
|
||||||
|
|
||||||
Provides information about modules. Similar to SHOW MODULES.
|
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
|
||||||
| --- | :------: | ------------ | ---------- |
|
|
||||||
| 1 | id | SMALLINT | Module ID |
|
|
||||||
| 2 | endpoint | BINARY(134) | Module endpoint |
|
|
||||||
| 3 | module | BINARY(10) | Module status |
|
|
||||||
|
|
||||||
## INS_QNODES
|
## INS_QNODES
|
||||||
|
|
||||||
Provides information about qnodes. Similar to SHOW QNODES.
|
Provides information about qnodes. Similar to SHOW QNODES.
|
||||||
|
@ -80,29 +78,33 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||||
| 1| name| BINARY(32)| Database name |
|
| 1| name| BINARY(32)| Database name |
|
||||||
| 2 | create_time | TIMESTAMP | Creation time |
|
| 2 | create_time | TIMESTAMP | Creation time |
|
||||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||||
| 4 | vgroups | INT | Number of vgroups |
|
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 6 | replica | INT | Number of replicas |
|
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 7 | quorum | BINARY(3) | Strong consistency |
|
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 8 | duration | INT | Duration for storage of single files |
|
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | keep | INT | Data retention period |
|
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 10 | buffer | INT | Write cache size per vnode, in MB |
|
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB |
|
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine |
|
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 13 | minrows | INT | Maximum number of records per file block |
|
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 14 | maxrows | INT | Minimum number of records per file block |
|
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 15 | comp | INT | Compression method |
|
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 16 | precision | BINARY(2) | Time precision |
|
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 17 | status | BINARY(10) | Current database status |
|
| 17 | status | BINARY(10) | Current database status |
|
||||||
| 18 | retention | BINARY (60) | Aggregation interval and retention period |
|
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables |
|
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data |
|
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data |
|
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 22 | wal_level | INT | WAL level |
|
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk |
|
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 24 | wal_retention_period | INT | WAL retention period |
|
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 25 | wal_retention_size | INT | Maximum WAL size |
|
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 26 | wal_roll_period | INT | WAL rotation period |
|
| 26 | wal_roll_period | INT | WAL rotation period. It should be noted that `wal_roll_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 27 | wal_segment_size | WAL file size |
|
| 27 | wal_segment_size | BIGINT | WAL file size. It should be noted that `wal_segment_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
||||||
## INS_FUNCTIONS
|
## INS_FUNCTIONS
|
||||||
|
|
||||||
|
@ -111,8 +113,8 @@ Provides information about user-defined functions.
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | -------------- |
|
| --- | :---------: | ------------ | -------------- |
|
||||||
| 1 | name | BINARY(64) | Function name |
|
| 1 | name | BINARY(64) | Function name |
|
||||||
| 2 | comment | BINARY(255) | Function description |
|
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function |
|
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 4 | output_type | BINARY(31) | Output data type |
|
| 4 | output_type | BINARY(31) | Output data type |
|
||||||
| 5 | create_time | TIMESTAMP | Creation time |
|
| 5 | create_time | TIMESTAMP | Creation time |
|
||||||
| 6 | code_len | INT | Length of the source code |
|
| 6 | code_len | INT | Length of the source code |
|
||||||
|
@ -141,12 +143,12 @@ Provides information about supertables.
|
||||||
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
||||||
| 3 | create_time | TIMESTAMP | Creation time |
|
| 3 | create_time | TIMESTAMP | Creation time |
|
||||||
| 4 | columns | INT | Number of columns |
|
| 4 | columns | INT | Number of columns |
|
||||||
| 5 | tags | INT | Number of tags |
|
| 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 6 | last_update | TIMESTAMP | Last updated time |
|
| 6 | last_update | TIMESTAMP | Last updated time |
|
||||||
| 7 | table_comment | BINARY(1024) | Table description |
|
| 7 | table_comment | BINARY(1024) | Table description |
|
||||||
| 8 | watermark | BINARY(64) | Window closing time |
|
| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results |
|
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 10 | rollup | BINARY(128) | Rollup aggregate function |
|
| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
||||||
## INS_TABLES
|
## INS_TABLES
|
||||||
|
|
||||||
|
@ -161,7 +163,7 @@ Provides information about standard tables and subtables.
|
||||||
| 5 | stable_name | BINARY(192) | Supertable name |
|
| 5 | stable_name | BINARY(192) | Supertable name |
|
||||||
| 6 | uid | BIGINT | Table ID |
|
| 6 | uid | BIGINT | Table ID |
|
||||||
| 7 | vgroup_id | INT | Vgroup ID |
|
| 7 | vgroup_id | INT | Vgroup ID |
|
||||||
| 8 | ttl | INT | Table time-to-live |
|
| 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | table_comment | BINARY(1024) | Table description |
|
| 9 | table_comment | BINARY(1024) | Table description |
|
||||||
| 10 | type | BINARY(20) | Table type |
|
| 10 | type | BINARY(20) | Table type |
|
||||||
|
|
||||||
|
@ -194,13 +196,13 @@ Provides information about TDengine Enterprise Edition permissions.
|
||||||
| --- | :---------: | ------------ | -------------------------------------------------- |
|
| --- | :---------: | ------------ | -------------------------------------------------- |
|
||||||
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
||||||
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
||||||
| 3 | dnodes | BINARY(10) | Dnodes included in license |
|
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 4 | streams | BINARY(10) | Streams included in license |
|
| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 5 | users | BINARY(10) | Users included in license |
|
| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 6 | streams | BINARY(10) | Accounts included in license |
|
| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 7 | storage | BINARY(21) | Storage space included in license |
|
| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 8 | connections | BINARY(21) | Client connections included in license |
|
| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | databases | BINARY(11) | Databases included in license |
|
| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
|
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
|
||||||
| 11 | querytime | BINARY(9) | Total query time specified in license |
|
| 11 | querytime | BINARY(9) | Total query time specified in license |
|
||||||
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
|
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
|
||||||
|
@ -215,7 +217,7 @@ Provides information about vgroups.
|
||||||
| --- | :-------: | ------------ | ------------------------------------------------------ |
|
| --- | :-------: | ------------ | ------------------------------------------------------ |
|
||||||
| 1 | vgroup_id | INT | Vgroup ID |
|
| 1 | vgroup_id | INT | Vgroup ID |
|
||||||
| 2 | db_name | BINARY(32) | Database name |
|
| 2 | db_name | BINARY(32) | Database name |
|
||||||
| 3 | tables | INT | Tables in vgroup |
|
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 4 | status | BINARY(10) | Vgroup status |
|
| 4 | status | BINARY(10) | Vgroup status |
|
||||||
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
|
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
|
||||||
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
|
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
|
||||||
|
@ -234,7 +236,7 @@ Provides system configuration information.
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :------: | ------------ | ------------ |
|
| --- | :------: | ------------ | ------------ |
|
||||||
| 1 | name | BINARY(32) | Parameter |
|
| 1 | name | BINARY(32) | Parameter |
|
||||||
| 2 | value | BINARY(64) | Value |
|
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
||||||
## INS_DNODE_VARIABLES
|
## INS_DNODE_VARIABLES
|
||||||
|
|
||||||
|
@ -244,7 +246,7 @@ Provides dnode configuration information.
|
||||||
| --- | :------: | ------------ | ------------ |
|
| --- | :------: | ------------ | ------------ |
|
||||||
| 1 | dnode_id | INT | Dnode ID |
|
| 1 | dnode_id | INT | Dnode ID |
|
||||||
| 2 | name | BINARY(32) | Parameter |
|
| 2 | name | BINARY(32) | Parameter |
|
||||||
| 3 | value | BINARY(64) | Value |
|
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
||||||
## INS_TOPICS
|
## INS_TOPICS
|
||||||
|
|
||||||
|
@ -275,5 +277,5 @@ Provides dnode configuration information.
|
||||||
| 5 | source_db | BINARY(64) | Source database |
|
| 5 | source_db | BINARY(64) | Source database |
|
||||||
| 6 | target_db | BIANRY(64) | Target database |
|
| 6 | target_db | BIANRY(64) | Target database |
|
||||||
| 7 | target_table | BINARY(192) | Target table |
|
| 7 | target_table | BINARY(192) | Target table |
|
||||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
|
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
|
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
|
@ -13,14 +13,6 @@ SHOW APPS;
|
||||||
|
|
||||||
Shows all clients (such as applications) that connect to the cluster.
|
Shows all clients (such as applications) that connect to the cluster.
|
||||||
|
|
||||||
## SHOW BNODES
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW BNODES;
|
|
||||||
```
|
|
||||||
|
|
||||||
Shows information about backup nodes (bnodes) in the system.
|
|
||||||
|
|
||||||
## SHOW CLUSTER
|
## SHOW CLUSTER
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -128,14 +120,6 @@ SHOW MNODES;
|
||||||
|
|
||||||
Shows information about mnodes in the system.
|
Shows information about mnodes in the system.
|
||||||
|
|
||||||
## SHOW MODULES
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW MODULES;
|
|
||||||
```
|
|
||||||
|
|
||||||
Shows information about modules installed in the system.
|
|
||||||
|
|
||||||
## SHOW QNODES
|
## SHOW QNODES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -154,14 +138,6 @@ Shows information about the storage space allowed by the license.
|
||||||
|
|
||||||
Note: TDengine Enterprise Edition only.
|
Note: TDengine Enterprise Edition only.
|
||||||
|
|
||||||
## SHOW SNODES
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW SNODES;
|
|
||||||
```
|
|
||||||
|
|
||||||
Shows information about stream processing nodes (snodes) in the system.
|
|
||||||
|
|
||||||
## SHOW STABLES
|
## SHOW STABLES
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -16,10 +16,10 @@ You can use the SHOW CONNECTIONS statement to find the conn_id.
|
||||||
## Terminate a Query
|
## Terminate a Query
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SHOW QUERY query_id;
|
KILL QUERY kill_id;
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use the SHOW QUERIES statement to find the query_id.
|
You can use the SHOW QUERIES statement to find the kill_id.
|
||||||
|
|
||||||
## Terminate a Transaction
|
## Terminate a Transaction
|
||||||
|
|
||||||
|
|
|
@ -11,12 +11,13 @@ description: "This document explains how TDengine SQL has changed in version 3.0
|
||||||
| 1 | VARCHAR | Added | Alias of BINARY.
|
| 1 | VARCHAR | Added | Alias of BINARY.
|
||||||
| 2 | TIMESTAMP literal | Added | TIMESTAMP 'timestamp format' syntax now supported.
|
| 2 | TIMESTAMP literal | Added | TIMESTAMP 'timestamp format' syntax now supported.
|
||||||
| 3 | _ROWTS pseudocolumn | Added | Indicates the primary key. Alias of _C0.
|
| 3 | _ROWTS pseudocolumn | Added | Indicates the primary key. Alias of _C0.
|
||||||
| 4 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
|
| 4 | _IROWTS pseudocolumn | Added | Used to retrieve timestamps with INTERP function.
|
||||||
| 5 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
|
| 5 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
|
||||||
| 6 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
|
| 6 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
|
||||||
| 7 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
|
| 7 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
|
||||||
| 8 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
|
| 8 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
|
||||||
| 9 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
|
| 9 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
|
||||||
|
| 10 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
|
||||||
|
|
||||||
## SQL Syntax
|
## SQL Syntax
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ title: Problem Diagnostics
|
||||||
|
|
||||||
When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems.
|
When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems.
|
||||||
|
|
||||||
Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows.
|
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
||||||
|
|
||||||
Diagnostic steps:
|
Diagnostic steps:
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ The following return value results indicate that the verification passed.
|
||||||
## HTTP request URL format
|
## HTTP request URL format
|
||||||
|
|
||||||
```text
|
```text
|
||||||
http://<fqdn>:<port>/rest/sql/[db_name]
|
http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone]
|
||||||
```
|
```
|
||||||
|
|
||||||
Parameter Description:
|
Parameter Description:
|
||||||
|
@ -75,6 +75,7 @@ Parameter Description:
|
||||||
- fqnd: FQDN or IP address of any host in the cluster.
|
- fqnd: FQDN or IP address of any host in the cluster.
|
||||||
- port: httpPort configuration item in the configuration file, default is 6041.
|
- port: httpPort configuration item in the configuration file, default is 6041.
|
||||||
- db_name: Optional parameter that specifies the default database name for the executed SQL command.
|
- db_name: Optional parameter that specifies the default database name for the executed SQL command.
|
||||||
|
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
|
||||||
|
|
||||||
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
|
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
|
||||||
|
|
||||||
|
@ -97,13 +98,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL
|
||||||
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
|
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
|
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone]
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
|
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone]
|
||||||
```
|
```
|
||||||
|
|
||||||
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`..
|
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`..
|
||||||
|
@ -123,7 +124,7 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.
|
||||||
|
|
||||||
### HTTP body structure
|
### HTTP body structure
|
||||||
|
|
||||||
#### Successful Operation
|
#### Successful Insert Operation
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
@ -143,7 +144,7 @@ Description:
|
||||||
- rows: (`int`) Only returns `1`.
|
- rows: (`int`) Only returns `1`.
|
||||||
- data: (`[][]any`) Returns the number of rows affected.
|
- data: (`[][]any`) Returns the number of rows affected.
|
||||||
|
|
||||||
#### Successful Query
|
#### Successful Query Operation
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
|
|
@ -13,11 +13,13 @@ After TDengine server or client installation, `taos.h` is located at
|
||||||
|
|
||||||
- Linux:`/usr/local/taos/include`
|
- Linux:`/usr/local/taos/include`
|
||||||
- Windows:`C:\TDengine\include`
|
- Windows:`C:\TDengine\include`
|
||||||
|
- macOS:`/usr/local/include`
|
||||||
|
|
||||||
The dynamic libraries for the TDengine client driver are located in.
|
The dynamic libraries for the TDengine client driver are located in.
|
||||||
|
|
||||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||||
- Windows: `C:\TDengine\taos.dll`
|
- Windows: `C:\TDengine\taos.dll`
|
||||||
|
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||||
|
|
||||||
## Supported platforms
|
## Supported platforms
|
||||||
|
|
||||||
|
@ -119,7 +121,7 @@ This section shows sample code for standard access methods to TDengine clusters
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
More example code and downloads are available at [GitHub](https://github.com/taosdata/TDengine/tree/develop/examples/c).
|
More example code and downloads are available at [GitHub](https://github.com/taosdata/TDengine/tree/develop/examples/c).
|
||||||
You can find it in the installation directory under the `examples/c` path. This directory has a makefile and can be compiled under Linux by executing `make` directly.
|
You can find it in the installation directory under the `examples/c` path. This directory has a makefile and can be compiled under Linux/macOS by executing `make` directly.
|
||||||
**Hint:** When compiling in an ARM environment, please remove `-msse4.2` from the makefile. This option is only supported on the x64/x86 hardware platforms.
|
**Hint:** When compiling in an ARM environment, please remove `-msse4.2` from the makefile. This option is only supported on the x64/x86 hardware platforms.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
|
@ -109,7 +109,7 @@ TDengine's JDBC URL specification format is:
|
||||||
|
|
||||||
For establishing connections, native connections differ slightly from REST connections.
|
For establishing connections, native connections differ slightly from REST connections.
|
||||||
|
|
||||||
<Tabs defaultValue="native">
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection">
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
@ -120,13 +120,13 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||||
|
|
||||||
In the above example, TSDBDriver, which uses a JDBC native connection, establishes a connection to a hostname `taosdemo.com`, port `6030` (the default port for TDengine), and a database named `test`. In this URL, the user name `user` is specified as `root`, and the `password` is `taosdata`.
|
In the above example, TSDBDriver, which uses a JDBC native connection, establishes a connection to a hostname `taosdemo.com`, port `6030` (the default port for TDengine), and a database named `test`. In this URL, the user name `user` is specified as `root`, and the `password` is `taosdata`.
|
||||||
|
|
||||||
Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows).
|
Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows; `libtaos.dylib` on macOS).
|
||||||
|
|
||||||
The configuration parameters in the URL are as follows:
|
The configuration parameters in the URL are as follows:
|
||||||
|
|
||||||
- user: Log in to the TDengine username. The default value is 'root'.
|
- user: Log in to the TDengine username. The default value is 'root'.
|
||||||
- password: User login password, the default value is 'taosdata'.
|
- password: User login password, the default value is 'taosdata'.
|
||||||
- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS.
|
- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS, '/etc/taos' on macOS.
|
||||||
- charset: The character set used by the client, the default value is the system character set.
|
- charset: The character set used by the client, the default value is the system character set.
|
||||||
- locale: Client locale, by default, use the system's current locale.
|
- locale: Client locale, by default, use the system's current locale.
|
||||||
- timezone: The time zone used by the client, the default value is the system's current time zone.
|
- timezone: The time zone used by the client, the default value is the system's current time zone.
|
||||||
|
@ -172,7 +172,7 @@ In the above example, JDBC uses the client's configuration file to establish a c
|
||||||
|
|
||||||
In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally.
|
In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally.
|
||||||
|
|
||||||
The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, and the default path is `C://TDengine/cfg/taos.cfg` on Windows.
|
The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, the default path is `C://TDengine/cfg/taos.cfg` on Windows, and the default path is `/etc/taos/taos.cfg` on macOS.
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="rest" label="REST connection">
|
<TabItem value="rest" label="REST connection">
|
||||||
|
@ -261,7 +261,7 @@ The configuration parameters in properties are as follows.
|
||||||
- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'.
|
- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'.
|
||||||
- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false.
|
- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false.
|
||||||
- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sql. false: no longer execute any statement after the failed SQL. The default value is: false.
|
- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sql. false: no longer execute any statement after the failed SQL. The default value is: false.
|
||||||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS.
|
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS, default value `/etc/taos` on macOS.
|
||||||
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
||||||
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
||||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
||||||
|
@ -896,7 +896,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
||||||
|
|
||||||
**Cause**: The program did not find the dependent native library `taos`.
|
**Cause**: The program did not find the dependent native library `taos`.
|
||||||
|
|
||||||
**Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work.
|
**Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work, on macOS the lib soft link will be `/usr/local/lib/libtaos.dylib`.
|
||||||
|
|
||||||
3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ title: TDengine Go Connector
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
import Preparition from "./_preparation.mdx"
|
|
||||||
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
|
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
|
||||||
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
|
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
|
||||||
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
||||||
|
@ -113,7 +112,7 @@ username:password@protocol(address)/dbname?param=value
|
||||||
```
|
```
|
||||||
### Connecting via connector
|
### Connecting via connector
|
||||||
|
|
||||||
<Tabs defaultValue="native">
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection">
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
_taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver.
|
_taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver.
|
||||||
|
@ -176,6 +175,37 @@ func main() {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
<TabItem value="WebSocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
_taosRestful_ implements Go's `database/sql/driver` interface via `http client`. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver (driver-go minimum version 3.0.2).
|
||||||
|
|
||||||
|
Use `taosWS` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN.
|
||||||
|
|
||||||
|
* `writeTimeout` The timeout to send data via WebSocket.
|
||||||
|
* `readTimeout` The timeout to receive response data via WebSocket.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
_ "github.com/taosdata/driver-go/v3/taosWS"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var taosUri = "root:taosdata@ws(localhost:6041)/"
|
||||||
|
taos, err := sql.Open("taosWS", taosUri)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("failed to connect TDengine, err:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Usage examples
|
## Usage examples
|
||||||
|
@ -331,7 +361,7 @@ Creates consumer group.
|
||||||
|
|
||||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
* `func (c *Consumer) Subscribe(topics []string) error`
|
||||||
|
|
||||||
Subscribes to a topic.
|
Subscribes to topics.
|
||||||
|
|
||||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||||
|
|
||||||
|
@ -409,6 +439,30 @@ Close consumer.
|
||||||
|
|
||||||
Closes the parameter binding.
|
Closes the parameter binding.
|
||||||
|
|
||||||
|
### Subscribe via WebSocket
|
||||||
|
|
||||||
|
* `func NewConsumer(config *Config) (*Consumer, error)`
|
||||||
|
|
||||||
|
Creates consumer group.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Subscribe(topic []string) error`
|
||||||
|
|
||||||
|
Subscribes to topics.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||||
|
|
||||||
|
Polling information.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Commit(messageID uint64) error`
|
||||||
|
|
||||||
|
Commit information.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Close() error`
|
||||||
|
|
||||||
|
Close consumer.
|
||||||
|
|
||||||
|
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||||
|
|
||||||
## API Reference
|
## API Reference
|
||||||
|
|
||||||
Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v3)
|
Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v3)
|
||||||
|
|
|
@ -55,16 +55,6 @@ taos = "*"
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="native" label="native connection only">
|
|
||||||
|
|
||||||
In `cargo.toml`, add [taos][taos] and enable the native feature:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[dependencies]
|
|
||||||
taos = { version = "*", default-features = false, features = ["native"] }
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="rest" label="Websocket only">
|
<TabItem value="rest" label="Websocket only">
|
||||||
|
|
||||||
In `cargo.toml`, add [taos][taos] and enable the ws feature:
|
In `cargo.toml`, add [taos][taos] and enable the ws feature:
|
||||||
|
@ -75,6 +65,18 @@ taos = { version = "*", default-features = false, features = ["ws"] }
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="native" label="native connection only">
|
||||||
|
|
||||||
|
In `cargo.toml`, add [taos][taos] and enable the native feature:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[dependencies]
|
||||||
|
taos = { version = "*", default-features = false, features = ["native"] }
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Establishing a connection
|
## Establishing a connection
|
||||||
|
@ -116,7 +118,7 @@ The parameters are described as follows:
|
||||||
- **protocol**: Specify which connection method to use. For example, `taos+ws://localhost:6041` uses Websocket to establish connections.
|
- **protocol**: Specify which connection method to use. For example, `taos+ws://localhost:6041` uses Websocket to establish connections.
|
||||||
- **username/password**: Username and password used to create connections.
|
- **username/password**: Username and password used to create connections.
|
||||||
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
||||||
- **database**: Specify the default database to connect to.
|
- **database**: Specify the default database to connect to. It's optional.
|
||||||
- **params**:Optional parameters.
|
- **params**:Optional parameters.
|
||||||
|
|
||||||
A sample DSN description string is as follows:
|
A sample DSN description string is as follows:
|
||||||
|
|
|
@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
||||||
|
|
||||||
### Preparation
|
### Preparation
|
||||||
|
|
||||||
1. Install Python. Python >= 3.6 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||||
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
|
||||||
|
|
||||||
### Verify
|
### Verify
|
||||||
|
|
||||||
<Tabs groupId="connect" default="native">
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection">
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type.
|
For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type.
|
||||||
|
@ -118,10 +118,10 @@ Requirement already satisfied: taospy in c:\users\username\appdata\local\program
|
||||||
|
|
||||||
Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster.
|
Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster.
|
||||||
|
|
||||||
<Tabs>
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection">
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a standalone version) can be resolved locally, by testing with the `ping` command.
|
Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a stand-alone version) can be resolved locally, by testing with the `ping` command.
|
||||||
|
|
||||||
```
|
```
|
||||||
ping <FQDN>
|
ping <FQDN>
|
||||||
|
@ -173,7 +173,7 @@ If the test is successful, it will output the server version information, e.g.
|
||||||
|
|
||||||
The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort.
|
The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort.
|
||||||
|
|
||||||
<Tabs>
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection" groupId="connect">
|
<TabItem value="native" label="native connection" groupId="connect">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -186,7 +186,7 @@ All arguments of the `connect()` function are optional keyword arguments. The fo
|
||||||
- `user` : The TDengine user name. The default value is `root`.
|
- `user` : The TDengine user name. The default value is `root`.
|
||||||
- `password` : TDengine user password. The default value is `taosdata`.
|
- `password` : TDengine user password. The default value is `taosdata`.
|
||||||
- `port` : The starting port of the data node to connect to, i.e., the serverPort configuration. The default value is 6030, which will only take effect if the host parameter is provided.
|
- `port` : The starting port of the data node to connect to, i.e., the serverPort configuration. The default value is 6030, which will only take effect if the host parameter is provided.
|
||||||
- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux systems.
|
- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux/macOS.
|
||||||
- `timezone` : The timezone used to convert the TIMESTAMP data in the query results to python `datetime` objects. The default is the local timezone.
|
- `timezone` : The timezone used to convert the TIMESTAMP data in the query results to python `datetime` objects. The default is the local timezone.
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
@ -219,7 +219,7 @@ All arguments to the `connect()` function are optional keyword arguments. The fo
|
||||||
|
|
||||||
### Basic Usage
|
### Basic Usage
|
||||||
|
|
||||||
<Tabs default="native" groupId="connect">
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection">
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
##### TaosConnection class
|
##### TaosConnection class
|
||||||
|
@ -289,7 +289,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
||||||
|
|
||||||
### Used with pandas
|
### Used with pandas
|
||||||
|
|
||||||
<Tabs default="native" groupId="connect">
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection">
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
|
@ -85,7 +85,7 @@ If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and
|
||||||
|
|
||||||
### Install via npm
|
### Install via npm
|
||||||
|
|
||||||
<Tabs defaultValue="install_native">
|
<Tabs defaultValue="install_rest">
|
||||||
<TabItem value="install_native" label="Install native connector">
|
<TabItem value="install_native" label="Install native connector">
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -124,7 +124,7 @@ node nodejsChecker.js host=localhost
|
||||||
|
|
||||||
Please choose to use one of the connectors.
|
Please choose to use one of the connectors.
|
||||||
|
|
||||||
<Tabs defaultValue="native">
|
<Tabs defaultValue="rest">
|
||||||
<TabItem value="native" label="native connection">
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
Install and import the `@tdengine/client` package.
|
Install and import the `@tdengine/client` package.
|
||||||
|
|
|
@ -17,7 +17,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
||||||
|
|
||||||
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
|
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
|
||||||
|
|
||||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [REST API](/reference/rest-api/) documentation.
|
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
||||||
|
|
||||||
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
|
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
|
||||||
|
|
||||||
|
@ -35,6 +35,10 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
|
|
||||||
## Supported features
|
## Supported features
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
|
||||||
|
<TabItem value="native" label="Native Connection">
|
||||||
|
|
||||||
1. Connection Management
|
1. Connection Management
|
||||||
2. General Query
|
2. General Query
|
||||||
3. Continuous Query
|
3. Continuous Query
|
||||||
|
@ -42,6 +46,18 @@ Please refer to [version support list](/reference/connector#version-support)
|
||||||
5. Subscription
|
5. Subscription
|
||||||
6. Schemaless
|
6. Schemaless
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="WebSocket Connection">
|
||||||
|
|
||||||
|
1. Connection Management
|
||||||
|
2. General Query
|
||||||
|
3. Continuous Query
|
||||||
|
4. Parameter Binding
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## Installation Steps
|
## Installation Steps
|
||||||
|
|
||||||
### Pre-installation preparation
|
### Pre-installation preparation
|
||||||
|
@ -74,12 +90,18 @@ cp -r src/ myProject
|
||||||
cd myProject
|
cd myProject
|
||||||
dotnet add exmaple.csproj reference src/TDengine.csproj
|
dotnet add exmaple.csproj reference src/TDengine.csproj
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Establish a Connection
|
## Establish a Connection
|
||||||
|
|
||||||
``` C#
|
|
||||||
|
<Tabs defaultValue="rest">
|
||||||
|
|
||||||
|
<TabItem value="native" label="Native Connection">
|
||||||
|
|
||||||
|
``` csharp
|
||||||
using TDengineDriver;
|
using TDengineDriver;
|
||||||
|
|
||||||
namespace TDengineExample
|
namespace TDengineExample
|
||||||
|
@ -112,14 +134,62 @@ namespace TDengineExample
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="WebSocket Connection">
|
||||||
|
|
||||||
|
The structure of the DSN description string is as follows:
|
||||||
|
|
||||||
|
```text
|
||||||
|
[<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|
||||||
|
|------------|---|-----------|-----------|------|------|------------|-----------------------|
|
||||||
|
| protocol | | username | password | host | port | database | params |
|
||||||
|
```
|
||||||
|
|
||||||
|
The parameters are described as follows:
|
||||||
|
|
||||||
|
* **protocol**: Specify which connection method to use (support http/ws). For example, `ws://localhost:6041` uses Websocket to establish connections.
|
||||||
|
* **username/password**: Username and password used to create connections.
|
||||||
|
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
||||||
|
* **database**: Specify the default database to connect to. It's optional.
|
||||||
|
* **params**:Optional parameters.
|
||||||
|
|
||||||
|
A sample DSN description string is as follows:
|
||||||
|
|
||||||
|
```text
|
||||||
|
ws://localhost:6041/test
|
||||||
|
```
|
||||||
|
|
||||||
|
``` csharp
|
||||||
|
{{#include docs/examples/csharp/wsConnect/Program.cs}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## Usage examples
|
## Usage examples
|
||||||
|
|
||||||
### Write data
|
### Write data
|
||||||
|
|
||||||
#### SQL Write
|
#### SQL Write
|
||||||
|
|
||||||
|
<Tabs defaultValue="rest">
|
||||||
|
|
||||||
|
<TabItem value="native" label="Native Connection">
|
||||||
|
|
||||||
<CSInsert />
|
<CSInsert />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="WebSocket Connection">
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
{{#include docs/examples/csharp/wsInsert/Program.cs}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
#### InfluxDB line protocol write
|
#### InfluxDB line protocol write
|
||||||
|
|
||||||
<CSInfluxLine />
|
<CSInfluxLine />
|
||||||
|
@ -132,12 +202,48 @@ namespace TDengineExample
|
||||||
|
|
||||||
<CSOpenTSDBJson />
|
<CSOpenTSDBJson />
|
||||||
|
|
||||||
|
#### Parameter Binding
|
||||||
|
|
||||||
|
<Tabs defaultValue="rest">
|
||||||
|
|
||||||
|
<TabItem value="native" label="Native Connection">
|
||||||
|
|
||||||
|
``` csharp
|
||||||
|
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="WebSocket Connection">
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
{{#include docs/examples/csharp/wsStmt/Program.cs}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
### Query data
|
### Query data
|
||||||
|
|
||||||
#### Synchronous Query
|
#### Synchronous Query
|
||||||
|
|
||||||
|
<Tabs defaultValue="rest">
|
||||||
|
|
||||||
|
<TabItem value="native" label="Native Connection">
|
||||||
|
|
||||||
<CSQuery />
|
<CSQuery />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="WebSocket Connection">
|
||||||
|
|
||||||
|
```csharp
|
||||||
|
{{#include docs/examples/csharp/wsQuery/Program.cs}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
#### Asynchronous query
|
#### Asynchronous query
|
||||||
|
|
||||||
<CSAsyncQuery />
|
<CSAsyncQuery />
|
||||||
|
@ -145,18 +251,21 @@ namespace TDengineExample
|
||||||
### More sample programs
|
### More sample programs
|
||||||
|
|
||||||
|Sample program |Sample program description |
|
|Sample program |Sample program description |
|
||||||
|--------------------------------------------------------------------------------------------------------------------|------------ --------------------------------|
|
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
||||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||||
| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Data subscription with TDengine Connector |
|
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||||
|
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||||
|
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||||
|
|
||||||
## Important update records
|
## Important update records
|
||||||
|
|
||||||
| TDengine.Connector | Description |
|
| TDengine.Connector | Description |
|
||||||
|--------------------|--------------------------------|
|
|--------------------|--------------------------------|
|
||||||
|
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
||||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||||
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
||||||
|
|
|
@ -13,11 +13,13 @@ After TDengine client or server is installed, `taos.h` is located at:
|
||||||
|
|
||||||
- Linux:`/usr/local/taos/include`
|
- Linux:`/usr/local/taos/include`
|
||||||
- Windows:`C:\TDengine\include`
|
- Windows:`C:\TDengine\include`
|
||||||
|
- macOS:`/usr/local/include`
|
||||||
|
|
||||||
TDengine client driver is located at:
|
TDengine client driver is located at:
|
||||||
|
|
||||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||||
- Windows: `C:\TDengine\taos.dll`
|
- Windows: `C:\TDengine\taos.dll`
|
||||||
|
- macOS:`/usr/local/lib/libtaos.dylib`
|
||||||
|
|
||||||
## Supported Platforms
|
## Supported Platforms
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ import PkgListV3 from "/components/PkgListV3";
|
||||||
|
|
||||||
<PkgListV3 type={1} sys="Linux" />
|
<PkgListV3 type={1} sys="Linux" />
|
||||||
|
|
||||||
[All Downloads](../../releases)
|
[All Downloads](../../releases/tdengine)
|
||||||
|
|
||||||
2. Unzip
|
2. Unzip
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
import PkgListV3 from "/components/PkgListV3";
|
||||||
|
|
||||||
|
1. Download the client installation package
|
||||||
|
|
||||||
|
<PkgListV3 type={8} sys="macOS" />
|
||||||
|
|
||||||
|
[All Downloads](../../releases/tdengine)
|
||||||
|
|
||||||
|
2. Execute the installer, select the default value as prompted, and complete the installation. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
|
||||||
|
3. configure taos.cfg
|
||||||
|
|
||||||
|
Edit `taos.cfg` file (full path is `/etc/taos/taos.cfg` by default), modify `firstEP` with actual TDengine server's End Point, for example `h1.tdengine.com:6030`
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
1. If the computer does not run the TDengine service but installs the TDengine client driver, then you need to config `firstEP` in `taos.cfg` only, and there is no need to configure `FQDN`;
|
||||||
|
2. If you encounter the "Unable to resolve FQDN" error, please make sure the FQDN in the `/etc/hosts` file of the current computer is correctly configured, or the DNS service is correctly configured.
|
||||||
|
|
||||||
|
:::
|
|
@ -6,5 +6,6 @@ Since the TDengine client driver is written in C, using the native connection re
|
||||||
|
|
||||||
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
||||||
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
||||||
|
- libtaos.dylib: After successful installation of TDengine on a mac system, the dependent macOS version of the client driver `libtaos.dylib` file will be automatically linked to `/usr/local/lib/libtaos.dylib`, which is included in the macOS scannable path and does not need to be specified separately.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
|
@ -4,11 +4,11 @@ Execute TDengine CLI program `taos` directly from the Linux shell to connect to
|
||||||
$ taos
|
$ taos
|
||||||
|
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
name |
|
||||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
information_schema |
|
||||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
performance_schema |
|
||||||
db | 2022-08-04 14:14:49.385 | 2 | 4 | 1 | off | 14400m | 5254560m,5254560m,5254560m | 96 | 4 | 256 | 100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
db |
|
||||||
Query OK, 3 rows in database (0.019154s)
|
Query OK, 3 rows in database (0.019154s)
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
Execute TDengine CLI program `taos` directly from the macOS shell to connect to the TDengine service and enter the TDengine CLI interface, as shown in the following example.
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ taos
|
||||||
|
|
||||||
|
taos> show databases;
|
||||||
|
name |
|
||||||
|
=================================
|
||||||
|
information_schema |
|
||||||
|
performance_schema |
|
||||||
|
db |
|
||||||
|
Query OK, 3 rows in database (0.019154s)
|
||||||
|
|
||||||
|
taos>
|
||||||
|
```
|
|
@ -2,12 +2,11 @@ Go to the `C:\TDengine` directory from `cmd` and execute TDengine CLI program `t
|
||||||
|
|
||||||
```text
|
```text
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
name |
|
||||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
information_schema |
|
||||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
performance_schema |
|
||||||
test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 |
|
test |
|
||||||
100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
|
||||||
Query OK, 3 rows in database (0.123000s)
|
Query OK, 3 rows in database (0.123000s)
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
|
|
|
@ -4,18 +4,20 @@ import PkgListV3 from "/components/PkgListV3";
|
||||||
|
|
||||||
<PkgListV3 type={4} sys="Windows" />
|
<PkgListV3 type={4} sys="Windows" />
|
||||||
|
|
||||||
[All Downloads](../../releases)
|
[All Downloads](../../releases/tdengine)
|
||||||
2. Execute the installer, select the default value as prompted, and complete the installation
|
2. Execute the installer, select the default value as prompted, and complete the installation
|
||||||
3. Installation path
|
3. Installation path
|
||||||
|
|
||||||
The default installation path is C:\TDengine, including the following files (directories).
|
The default installation path is C:\TDengine, including the following files (directories).
|
||||||
|
|
||||||
- _taos.exe_ : TDengine CLI command-line program
|
- _taos.exe_: TDengine CLI command-line program
|
||||||
- _cfg_ : configuration file directory
|
- _taosadapter.exe_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
|
||||||
|
- _taosBenchmark.exe_: TDengine testing tool
|
||||||
|
- _cfg_: configuration file directory
|
||||||
- _driver_: client driver dynamic link library
|
- _driver_: client driver dynamic link library
|
||||||
- _examples_: sample programs bash/C/C#/go/JDBC/Python/Node.js
|
- _examples_: sample programs bash/C/C#/go/JDBC/Python/Node.js
|
||||||
- _include_: header files
|
- _include_: header files
|
||||||
- _log_ : log file
|
- _log_: log file
|
||||||
- _unins000.exe_: uninstaller
|
- _unins000.exe_: uninstaller
|
||||||
|
|
||||||
4. configure taos.cfg
|
4. configure taos.cfg
|
||||||
|
|
|
@ -8,13 +8,15 @@ TDengine provides a rich set of APIs (application development interface). To fac
|
||||||
|
|
||||||
## Supported platforms
|
## Supported platforms
|
||||||
|
|
||||||
Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux and Windows development environments. The comparison matrix is as follows.
|
Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux/Windows/macOS development environments. The comparison matrix is as follows.
|
||||||
|
|
||||||
| **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
| **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
||||||
| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- |
|
| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- |
|
||||||
| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● |
|
| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● |
|
||||||
| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● |
|
| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● |
|
||||||
|
| **X86 64bit** | **macOS** | ○ | ● | ● | ○ | ○ | ● | ● |
|
||||||
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||||
|
| **ARM64** | **macOS** | ○ | ● | ● | ○ | ○ | ● | ● |
|
||||||
|
|
||||||
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.
|
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.
|
||||||
|
|
||||||
|
@ -72,10 +74,12 @@ The different database framework specifications for various programming language
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
import InstallOnWindows from "./_linux_install.mdx";
|
import InstallOnLinux from "./_linux_install.mdx";
|
||||||
import InstallOnLinux from "./_windows_install.mdx";
|
import InstallOnWindows from "./_windows_install.mdx";
|
||||||
|
import InstallOnMacOS from "./_macos_install.mdx";
|
||||||
import VerifyWindows from "./_verify_windows.mdx";
|
import VerifyWindows from "./_verify_windows.mdx";
|
||||||
import VerifyLinux from "./_verify_linux.mdx";
|
import VerifyLinux from "./_verify_linux.mdx";
|
||||||
|
import VerifyMacOS from "./_verify_macos.mdx";
|
||||||
|
|
||||||
## Install Client Driver
|
## Install Client Driver
|
||||||
|
|
||||||
|
@ -88,10 +92,13 @@ The client driver needs to be installed if you use the native interface connecto
|
||||||
|
|
||||||
<Tabs defaultValue="linux" groupId="os">
|
<Tabs defaultValue="linux" groupId="os">
|
||||||
<TabItem value="linux" label="Linux">
|
<TabItem value="linux" label="Linux">
|
||||||
<InstallOnWindows />
|
<InstallOnLinux />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="windows" label="Windows">
|
<TabItem value="windows" label="Windows">
|
||||||
<InstallOnLinux />
|
<InstallOnWindows />
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="macos" label="MacOS">
|
||||||
|
<InstallOnMacOS />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
@ -106,5 +113,8 @@ After completing the above installation and configuration and you have confirmed
|
||||||
<TabItem value="windows" label="Windows">
|
<TabItem value="windows" label="Windows">
|
||||||
<VerifyWindows />
|
<VerifyWindows />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
<TabItem value="macos" label="MacOS">
|
||||||
|
<VerifyMacOS />
|
||||||
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ taosAdapter provides the following features.
|
||||||
|
|
||||||
### Install taosAdapter
|
### Install taosAdapter
|
||||||
|
|
||||||
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
||||||
|
|
||||||
### Start/Stop taosAdapter
|
### Start/Stop taosAdapter
|
||||||
|
|
||||||
|
@ -197,6 +197,7 @@ Support InfluxDB query parameters as follows.
|
||||||
- `p` TDengine password
|
- `p` TDengine password
|
||||||
|
|
||||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||||
|
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||||
|
|
||||||
### OpenTSDB
|
### OpenTSDB
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ toc_max_heading_level: 4
|
||||||
description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine."
|
description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine."
|
||||||
---
|
---
|
||||||
|
|
||||||
## Introduction
|
# Introduction
|
||||||
|
|
||||||
taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can be configured to generate user defined databases, supertables, subtables, and the time series data to populate these for performance benchmarking. taosBenchmark is highly configurable and some of the configurations include the time interval for inserting data, the number of working threads and the capability to insert disordered data. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users.
|
taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can be configured to generate user defined databases, supertables, subtables, and the time series data to populate these for performance benchmarking. taosBenchmark is highly configurable and some of the configurations include the time interval for inserting data, the number of working threads and the capability to insert disordered data. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users.
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ There are two ways to install taosBenchmark:
|
||||||
|
|
||||||
TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
|
TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
|
||||||
|
|
||||||
taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. If you want to test the performance of queries or data subscriptionm configure taosBenchmark with the configuration file. You can modify the value of the `filetype` parameter to specify the function that you want to test.
|
taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. If you want to test the performance of queries or data subscription configure taosBenchmark with the configuration file. You can modify the value of the `filetype` parameter to specify the function that you want to test.
|
||||||
|
|
||||||
**Make sure that the TDengine cluster is running correctly before running taosBenchmark. **
|
**Make sure that the TDengine cluster is running correctly before running taosBenchmark. **
|
||||||
|
|
||||||
|
@ -112,6 +112,9 @@ taosBenchmark -f <json file>
|
||||||
- **-u/--user <user\>** :
|
- **-u/--user <user\>** :
|
||||||
User name to connect to the TDengine server. Default is root.
|
User name to connect to the TDengine server. Default is root.
|
||||||
|
|
||||||
|
- **-U/--supplement-insert ** :
|
||||||
|
Supplementally insert data without create database and table, optional, default is off.
|
||||||
|
|
||||||
- **-p/--password <passwd\>** :
|
- **-p/--password <passwd\>** :
|
||||||
The default password to connect to the TDengine server is `taosdata`.
|
The default password to connect to the TDengine server is `taosdata`.
|
||||||
|
|
||||||
|
@ -148,6 +151,9 @@ taosBenchmark -f <json file>
|
||||||
- **-l/--columns <colNum\>** :
|
- **-l/--columns <colNum\>** :
|
||||||
specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`.
|
specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`.
|
||||||
|
|
||||||
|
- **-L/--partial-col-num <colNum\> ** :
|
||||||
|
Specify first numbers of columns has data. Rest of columns' data are NULL. Default is all columns have data.
|
||||||
|
|
||||||
- **-A/--tag-type <tagType\>** :
|
- **-A/--tag-type <tagType\>** :
|
||||||
The tag column type of the super table. nchar and binary types can both set the length, for example:
|
The tag column type of the super table. nchar and binary types can both set the length, for example:
|
||||||
|
|
||||||
|
@ -231,7 +237,7 @@ The parameters related to database creation are configured in `dbinfo` in the js
|
||||||
|
|
||||||
- **name**: specify the name of the database.
|
- **name**: specify the name of the database.
|
||||||
|
|
||||||
- **drop**: indicate whether to delete the database before inserting. The default is true.
|
- **drop**: indicate whether to delete the database before inserting. The value can be 'yes' or 'no'. No means do not drop. The default is to drop.
|
||||||
|
|
||||||
#### Stream processing related configuration parameters
|
#### Stream processing related configuration parameters
|
||||||
|
|
||||||
|
@ -334,13 +340,13 @@ The configuration parameters for specifying super table tag columns and data col
|
||||||
|
|
||||||
- **name** : The name of the column, if used together with count, e.g. "name": "current", "count":3, then the names of the 3 columns are current, current_2. current_3.
|
- **name** : The name of the column, if used together with count, e.g. "name": "current", "count":3, then the names of the 3 columns are current, current_2. current_3.
|
||||||
|
|
||||||
- **min**: The minimum value of the column/label of the data type.
|
- **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value.
|
||||||
|
|
||||||
- **max**: The maximum value of the column/label of the data type.
|
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maxium value.
|
||||||
|
|
||||||
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
||||||
|
|
||||||
- **sma**: Insert the column into the BSMA. Enter `yes` or `no`. The default is `no`.
|
- **sma**: Insert the column into the SMA. Enter `yes` or `no`. The default is `no`.
|
||||||
|
|
||||||
#### insertion behavior configuration parameters
|
#### insertion behavior configuration parameters
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ If executed on the TDengine server-side, there is no need for additional install
|
||||||
|
|
||||||
## Execution
|
## Execution
|
||||||
|
|
||||||
To access the TDengine CLI, you can execute `taos` command-line utility from a Linux terminal or Windows terminal.
|
To access the TDengine CLI, you can execute `taos` command-line utility from a terminal.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taos
|
taos
|
||||||
|
|
|
@ -5,28 +5,28 @@ description: "List of platforms supported by TDengine server, client, and connec
|
||||||
|
|
||||||
## List of supported platforms for TDengine server
|
## List of supported platforms for TDengine server
|
||||||
|
|
||||||
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** |
|
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **macOS** |
|
||||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- |
|
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
|
||||||
| X64 | ● | ● | ● | ● |
|
| X64 | ● | ● | ● | ● | ● |
|
||||||
| ARM64 | | | ● | |
|
| ARM64 | | | ● | | ● |
|
||||||
|
|
||||||
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
||||||
|
|
||||||
## List of supported platforms for TDengine clients and connectors
|
## List of supported platforms for TDengine clients and connectors
|
||||||
|
|
||||||
TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha hardware platforms and Linux/Win64/Win32 development environments.
|
TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha hardware platforms and Linux/Win64/Win32/macOS development environments.
|
||||||
|
|
||||||
The comparison matrix is as follows.
|
The comparison matrix is as follows.
|
||||||
|
|
||||||
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** |
|
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** | **X64 64bit** | **ARM64** |
|
||||||
| ----------- | ------------- | ------------- | --------- |
|
| ----------- | ------------- | ------------- | --------- | ------------- | --------- |
|
||||||
| **OS** | **Linux** | **Win64** | **Linux** |
|
| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** |
|
||||||
| **C/C++** | ● | ● | ● |
|
| **C/C++** | ● | ● | ● | ● | ● |
|
||||||
| **JDBC** | ● | ● | ● |
|
| **JDBC** | ● | ● | ● | ○ | ○ |
|
||||||
| **Python** | ● | ● | ● |
|
| **Python** | ● | ● | ● | ● | ● |
|
||||||
| **Go** | ● | ● | ● |
|
| **Go** | ● | ● | ● | ● | ● |
|
||||||
| **NodeJs** | ● | ● | ● |
|
| **NodeJs** | ● | ● | ● | ○ | ○ |
|
||||||
| **C#** | ● | ● | ○ |
|
| **C#** | ● | ● | ○ | ○ | ○ |
|
||||||
| **RESTful** | ● | ● | ● |
|
| **RESTful** | ● | ● | ● | ● | ● |
|
||||||
|
|
||||||
Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified.
|
Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified.
|
||||||
|
|
|
@ -25,10 +25,11 @@ The TDengine client taos can be executed in this container to access TDengine us
|
||||||
$ docker exec -it tdengine taos
|
$ docker exec -it tdengine taos
|
||||||
|
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
name |
|
||||||
====================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
|
information_schema |
|
||||||
Query OK, 1 row(s) in set (0.002843s)
|
performance_schema |
|
||||||
|
Query OK, 2 row(s) in set (0.002843s)
|
||||||
```
|
```
|
||||||
|
|
||||||
The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios.
|
The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios.
|
||||||
|
|
|
@ -164,7 +164,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| -------- | -------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| 含义 | SMA index optimization policy |
|
| Meaning | SMA index optimization policy |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
| Notes |
|
| Notes |
|
||||||
|
@ -177,12 +177,21 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- | --- |
|
| -------- | -------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The maximum number of distinct rows returned |
|
| Meaning | The maximum number of distinct rows returned |
|
||||||
| Value Range | [100,000 - 100,000,000] |
|
| Value Range | [100,000 - 100,000,000] |
|
||||||
| Default Value | 100,000 |
|
| Default Value | 100,000 |
|
||||||
|
|
||||||
|
### keepColumnName
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | -------------------------------- |
|
||||||
|
| Applicable | Client only |
|
||||||
|
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||||
|
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||||
|
| Default Value | 0 |
|
||||||
|
|
||||||
## Locale Parameters
|
## Locale Parameters
|
||||||
|
|
||||||
### timezone
|
### timezone
|
||||||
|
@ -196,7 +205,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
:::info
|
:::info
|
||||||
To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. Note that Unix timestamps are converted and recorded on the client side. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
|
To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. Note that Unix timestamps are converted and recorded on the client side. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
|
||||||
|
|
||||||
On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
|
On Linux/macOS, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
timezone UTC-8
|
timezone UTC-8
|
||||||
|
@ -239,9 +248,9 @@ To avoid the problems of using time strings, Unix timestamp can be used directly
|
||||||
:::info
|
:::info
|
||||||
A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. Note that the correct encoding is determined by the user. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
|
A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. Note that the correct encoding is determined by the user. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
|
||||||
|
|
||||||
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
|
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux/macOS, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
|
||||||
|
|
||||||
The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
|
The locale definition standard on Linux/macOS is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -254,9 +263,9 @@ The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, f
|
||||||
| Default Value | charset set in the system |
|
| Default Value | charset set in the system |
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
|
On Linux/macOS, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
|
||||||
|
|
||||||
So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
|
So on Linux/macOS, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
locale zh_CN.UTF-8
|
locale zh_CN.UTF-8
|
||||||
|
@ -270,7 +279,7 @@ charset CP936
|
||||||
|
|
||||||
Refer to the documentation for your operating system before changing the charset.
|
Refer to the documentation for your operating system before changing the charset.
|
||||||
|
|
||||||
On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
|
On a Linux/macOS, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
|
||||||
|
|
||||||
```
|
```
|
||||||
locale zh_CN.UTF-8
|
locale zh_CN.UTF-8
|
||||||
|
@ -325,7 +334,7 @@ The charset that takes effect is UTF-8.
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Maximum number of vnodes per dnode |
|
| Meaning | Maximum number of vnodes per dnode |
|
||||||
| Value Range | 0-4096 |
|
| Value Range | 0-4096 |
|
||||||
| Default Value | 256 |
|
| Default Value | 2x the CPU cores |
|
||||||
|
|
||||||
## Time Parameters
|
## Time Parameters
|
||||||
|
|
||||||
|
@ -666,7 +675,7 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
||||||
| Meaning | Whether to generate core file when server crashes |
|
| Meaning | Whether to generate core file when server crashes |
|
||||||
| Value Range | 0: false, 1: true |
|
| Value Range | 0: false, 1: true |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
| Note | The core file is generated under root directory `systemctl start taosd` is used to start, or under the working directory if `taosd` is started directly on Linux Shell. |
|
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
|
||||||
|
|
||||||
### udf
|
### udf
|
||||||
|
|
||||||
|
@ -697,152 +706,154 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
||||||
| 15 | telemetryPort | No | Yes |
|
| 15 | telemetryPort | No | Yes |
|
||||||
| 16 | queryPolicy | No | Yes |
|
| 16 | queryPolicy | No | Yes |
|
||||||
| 17 | querySmaOptimize | No | Yes |
|
| 17 | querySmaOptimize | No | Yes |
|
||||||
| 18 | queryBufferSize | Yes | Yes |
|
| 18 | queryRsmaTolerance | No | Yes |
|
||||||
| 19 | maxNumOfDistinctRes | Yes | Yes |
|
| 19 | queryBufferSize | Yes | Yes |
|
||||||
| 20 | minSlidingTime | Yes | Yes |
|
| 20 | maxNumOfDistinctRes | Yes | Yes |
|
||||||
| 21 | minIntervalTime | Yes | Yes |
|
| 21 | minSlidingTime | Yes | Yes |
|
||||||
| 22 | countAlwaysReturnValue | Yes | Yes |
|
| 22 | minIntervalTime | Yes | Yes |
|
||||||
| 23 | dataDir | Yes | Yes |
|
| 23 | countAlwaysReturnValue | Yes | Yes |
|
||||||
| 24 | minimalDataDirGB | Yes | Yes |
|
| 24 | dataDir | Yes | Yes |
|
||||||
| 25 | supportVnodes | No | Yes |
|
| 25 | minimalDataDirGB | Yes | Yes |
|
||||||
| 26 | tempDir | Yes | Yes |
|
| 26 | supportVnodes | No | Yes |
|
||||||
| 27 | minimalTmpDirGB | Yes | Yes |
|
| 27 | tempDir | Yes | Yes |
|
||||||
| 28 | compressMsgSize | Yes | Yes |
|
| 28 | minimalTmpDirGB | Yes | Yes |
|
||||||
| 29 | compressColData | Yes | Yes |
|
| 29 | compressMsgSize | Yes | Yes |
|
||||||
| 30 | smlChildTableName | Yes | Yes |
|
| 30 | compressColData | Yes | Yes |
|
||||||
| 31 | smlTagName | Yes | Yes |
|
| 31 | smlChildTableName | Yes | Yes |
|
||||||
| 32 | smlDataFormat | No | Yes |
|
| 32 | smlTagName | Yes | Yes |
|
||||||
| 33 | statusInterval | Yes | Yes |
|
| 33 | smlDataFormat | No | Yes |
|
||||||
| 34 | shellActivityTimer | Yes | Yes |
|
| 34 | statusInterval | Yes | Yes |
|
||||||
| 35 | transPullupInterval | No | Yes |
|
| 35 | shellActivityTimer | Yes | Yes |
|
||||||
| 36 | mqRebalanceInterval | No | Yes |
|
| 36 | transPullupInterval | No | Yes |
|
||||||
| 37 | ttlUnit | No | Yes |
|
| 37 | mqRebalanceInterval | No | Yes |
|
||||||
| 38 | ttlPushInterval | No | Yes |
|
| 38 | ttlUnit | No | Yes |
|
||||||
| 39 | numOfTaskQueueThreads | No | Yes |
|
| 39 | ttlPushInterval | No | Yes |
|
||||||
| 40 | numOfRpcThreads | No | Yes |
|
| 40 | numOfTaskQueueThreads | No | Yes |
|
||||||
| 41 | numOfCommitThreads | Yes | Yes |
|
| 41 | numOfRpcThreads | No | Yes |
|
||||||
| 42 | numOfMnodeReadThreads | No | Yes |
|
| 42 | numOfCommitThreads | Yes | Yes |
|
||||||
| 43 | numOfVnodeQueryThreads | No | Yes |
|
| 43 | numOfMnodeReadThreads | No | Yes |
|
||||||
| 44 | numOfVnodeStreamThreads | No | Yes |
|
| 44 | numOfVnodeQueryThreads | No | Yes |
|
||||||
| 45 | numOfVnodeFetchThreads | No | Yes |
|
| 45 | numOfVnodeStreamThreads | No | Yes |
|
||||||
| 46 | numOfVnodeWriteThreads | No | Yes |
|
| 46 | numOfVnodeFetchThreads | No | Yes |
|
||||||
| 47 | numOfVnodeSyncThreads | No | Yes |
|
| 47 | numOfVnodeWriteThreads | No | Yes |
|
||||||
| 48 | numOfQnodeQueryThreads | No | Yes |
|
| 48 | numOfVnodeSyncThreads | No | Yes |
|
||||||
| 49 | numOfQnodeFetchThreads | No | Yes |
|
| 49 | numOfVnodeRsmaThreads | No | Yes |
|
||||||
| 50 | numOfSnodeSharedThreads | No | Yes |
|
| 50 | numOfQnodeQueryThreads | No | Yes |
|
||||||
| 51 | numOfSnodeUniqueThreads | No | Yes |
|
| 51 | numOfQnodeFetchThreads | No | Yes |
|
||||||
| 52 | rpcQueueMemoryAllowed | No | Yes |
|
| 52 | numOfSnodeSharedThreads | No | Yes |
|
||||||
| 53 | logDir | Yes | Yes |
|
| 53 | numOfSnodeUniqueThreads | No | Yes |
|
||||||
| 54 | minimalLogDirGB | Yes | Yes |
|
| 54 | rpcQueueMemoryAllowed | No | Yes |
|
||||||
| 55 | numOfLogLines | Yes | Yes |
|
| 55 | logDir | Yes | Yes |
|
||||||
| 56 | asyncLog | Yes | Yes |
|
| 56 | minimalLogDirGB | Yes | Yes |
|
||||||
| 57 | logKeepDays | Yes | Yes |
|
| 57 | numOfLogLines | Yes | Yes |
|
||||||
| 58 | debugFlag | Yes | Yes |
|
| 58 | asyncLog | Yes | Yes |
|
||||||
| 59 | tmrDebugFlag | Yes | Yes |
|
| 59 | logKeepDays | Yes | Yes |
|
||||||
| 60 | uDebugFlag | Yes | Yes |
|
| 60 | debugFlag | Yes | Yes |
|
||||||
| 61 | rpcDebugFlag | Yes | Yes |
|
| 61 | tmrDebugFlag | Yes | Yes |
|
||||||
| 62 | jniDebugFlag | Yes | Yes |
|
| 62 | uDebugFlag | Yes | Yes |
|
||||||
| 63 | qDebugFlag | Yes | Yes |
|
| 63 | rpcDebugFlag | Yes | Yes |
|
||||||
| 64 | cDebugFlag | Yes | Yes |
|
| 64 | jniDebugFlag | Yes | Yes |
|
||||||
| 65 | dDebugFlag | Yes | Yes |
|
| 65 | qDebugFlag | Yes | Yes |
|
||||||
| 66 | vDebugFlag | Yes | Yes |
|
| 66 | cDebugFlag | Yes | Yes |
|
||||||
| 67 | mDebugFlag | Yes | Yes |
|
| 67 | dDebugFlag | Yes | Yes |
|
||||||
| 68 | wDebugFlag | Yes | Yes |
|
| 68 | vDebugFlag | Yes | Yes |
|
||||||
| 69 | sDebugFlag | Yes | Yes |
|
| 69 | mDebugFlag | Yes | Yes |
|
||||||
| 70 | tsdbDebugFlag | Yes | Yes |
|
| 70 | wDebugFlag | Yes | Yes |
|
||||||
| 71 | tqDebugFlag | No | Yes |
|
| 71 | sDebugFlag | Yes | Yes |
|
||||||
| 72 | fsDebugFlag | Yes | Yes |
|
| 72 | tsdbDebugFlag | Yes | Yes |
|
||||||
| 73 | udfDebugFlag | No | Yes |
|
| 73 | tqDebugFlag | No | Yes |
|
||||||
| 74 | smaDebugFlag | No | Yes |
|
| 74 | fsDebugFlag | Yes | Yes |
|
||||||
| 75 | idxDebugFlag | No | Yes |
|
| 75 | udfDebugFlag | No | Yes |
|
||||||
| 76 | tdbDebugFlag | No | Yes |
|
| 76 | smaDebugFlag | No | Yes |
|
||||||
| 77 | metaDebugFlag | No | Yes |
|
| 77 | idxDebugFlag | No | Yes |
|
||||||
| 78 | timezone | Yes | Yes |
|
| 78 | tdbDebugFlag | No | Yes |
|
||||||
| 79 | locale | Yes | Yes |
|
| 79 | metaDebugFlag | No | Yes |
|
||||||
| 80 | charset | Yes | Yes |
|
| 80 | timezone | Yes | Yes |
|
||||||
| 81 | udf | Yes | Yes |
|
| 81 | locale | Yes | Yes |
|
||||||
| 82 | enableCoreFile | Yes | Yes |
|
| 82 | charset | Yes | Yes |
|
||||||
| 83 | arbitrator | Yes | No |
|
| 83 | udf | Yes | Yes |
|
||||||
| 84 | numOfThreadsPerCore | Yes | No |
|
| 84 | enableCoreFile | Yes | Yes |
|
||||||
| 85 | numOfMnodes | Yes | No |
|
| 85 | arbitrator | Yes | No |
|
||||||
| 86 | vnodeBak | Yes | No |
|
| 86 | numOfThreadsPerCore | Yes | No |
|
||||||
| 87 | balance | Yes | No |
|
| 87 | numOfMnodes | Yes | No |
|
||||||
| 88 | balanceInterval | Yes | No |
|
| 88 | vnodeBak | Yes | No |
|
||||||
| 89 | offlineThreshold | Yes | No |
|
| 89 | balance | Yes | No |
|
||||||
| 90 | role | Yes | No |
|
| 90 | balanceInterval | Yes | No |
|
||||||
| 91 | dnodeNopLoop | Yes | No |
|
| 91 | offlineThreshold | Yes | No |
|
||||||
| 92 | keepTimeOffset | Yes | No |
|
| 92 | role | Yes | No |
|
||||||
| 93 | rpcTimer | Yes | No |
|
| 93 | dnodeNopLoop | Yes | No |
|
||||||
| 94 | rpcMaxTime | Yes | No |
|
| 94 | keepTimeOffset | Yes | No |
|
||||||
| 95 | rpcForceTcp | Yes | No |
|
| 95 | rpcTimer | Yes | No |
|
||||||
| 96 | tcpConnTimeout | Yes | No |
|
| 96 | rpcMaxTime | Yes | No |
|
||||||
| 97 | syncCheckInterval | Yes | No |
|
| 97 | rpcForceTcp | Yes | No |
|
||||||
| 98 | maxTmrCtrl | Yes | No |
|
| 98 | tcpConnTimeout | Yes | No |
|
||||||
| 99 | monitorReplica | Yes | No |
|
| 99 | syncCheckInterval | Yes | No |
|
||||||
| 100 | smlTagNullName | Yes | No |
|
| 100 | maxTmrCtrl | Yes | No |
|
||||||
| 101 | keepColumnName | Yes | No |
|
| 101 | monitorReplica | Yes | No |
|
||||||
| 102 | ratioOfQueryCores | Yes | No |
|
| 102 | smlTagNullName | Yes | No |
|
||||||
| 103 | maxStreamCompDelay | Yes | No |
|
| 103 | keepColumnName | Yes | No |
|
||||||
| 104 | maxFirstStreamCompDelay | Yes | No |
|
| 104 | ratioOfQueryCores | Yes | No |
|
||||||
| 105 | retryStreamCompDelay | Yes | No |
|
| 105 | maxStreamCompDelay | Yes | No |
|
||||||
| 106 | streamCompDelayRatio | Yes | No |
|
| 106 | maxFirstStreamCompDelay | Yes | No |
|
||||||
| 107 | maxVgroupsPerDb | Yes | No |
|
| 107 | retryStreamCompDelay | Yes | No |
|
||||||
| 108 | maxTablesPerVnode | Yes | No |
|
| 108 | streamCompDelayRatio | Yes | No |
|
||||||
| 109 | minTablesPerVnode | Yes | No |
|
| 109 | maxVgroupsPerDb | Yes | No |
|
||||||
| 110 | tableIncStepPerVnode | Yes | No |
|
| 110 | maxTablesPerVnode | Yes | No |
|
||||||
| 111 | cache | Yes | No |
|
| 111 | minTablesPerVnode | Yes | No |
|
||||||
| 112 | blocks | Yes | No |
|
| 112 | tableIncStepPerVnode | Yes | No |
|
||||||
| 113 | days | Yes | No |
|
| 113 | cache | Yes | No |
|
||||||
| 114 | keep | Yes | No |
|
| 114 | blocks | Yes | No |
|
||||||
| 115 | minRows | Yes | No |
|
| 115 | days | Yes | No |
|
||||||
| 116 | maxRows | Yes | No |
|
| 116 | keep | Yes | No |
|
||||||
| 117 | quorum | Yes | No |
|
| 117 | minRows | Yes | No |
|
||||||
| 118 | comp | Yes | No |
|
| 118 | maxRows | Yes | No |
|
||||||
| 119 | walLevel | Yes | No |
|
| 119 | quorum | Yes | No |
|
||||||
| 120 | fsync | Yes | No |
|
| 120 | comp | Yes | No |
|
||||||
| 121 | replica | Yes | No |
|
| 121 | walLevel | Yes | No |
|
||||||
| 122 | partitions | Yes | No |
|
| 122 | fsync | Yes | No |
|
||||||
| 123 | quorum | Yes | No |
|
| 123 | replica | Yes | No |
|
||||||
| 124 | update | Yes | No |
|
| 124 | partitions | Yes | No |
|
||||||
| 125 | cachelast | Yes | No |
|
| 125 | quorum | Yes | No |
|
||||||
| 126 | maxSQLLength | Yes | No |
|
| 126 | update | Yes | No |
|
||||||
| 127 | maxWildCardsLength | Yes | No |
|
| 127 | cachelast | Yes | No |
|
||||||
| 128 | maxRegexStringLen | Yes | No |
|
| 128 | maxSQLLength | Yes | No |
|
||||||
| 129 | maxNumOfOrderedRes | Yes | No |
|
| 129 | maxWildCardsLength | Yes | No |
|
||||||
| 130 | maxConnections | Yes | No |
|
| 130 | maxRegexStringLen | Yes | No |
|
||||||
| 131 | mnodeEqualVnodeNum | Yes | No |
|
| 131 | maxNumOfOrderedRes | Yes | No |
|
||||||
| 132 | http | Yes | No |
|
| 132 | maxConnections | Yes | No |
|
||||||
| 133 | httpEnableRecordSql | Yes | No |
|
| 133 | mnodeEqualVnodeNum | Yes | No |
|
||||||
| 134 | httpMaxThreads | Yes | No |
|
| 134 | http | Yes | No |
|
||||||
| 135 | restfulRowLimit | Yes | No |
|
| 135 | httpEnableRecordSql | Yes | No |
|
||||||
| 136 | httpDbNameMandatory | Yes | No |
|
| 136 | httpMaxThreads | Yes | No |
|
||||||
| 137 | httpKeepAlive | Yes | No |
|
| 137 | restfulRowLimit | Yes | No |
|
||||||
| 138 | enableRecordSql | Yes | No |
|
| 138 | httpDbNameMandatory | Yes | No |
|
||||||
| 139 | maxBinaryDisplayWidth | Yes | No |
|
| 139 | httpKeepAlive | Yes | No |
|
||||||
| 140 | stream | Yes | No |
|
| 140 | enableRecordSql | Yes | No |
|
||||||
| 141 | retrieveBlockingModel | Yes | No |
|
| 141 | maxBinaryDisplayWidth | Yes | No |
|
||||||
| 142 | tsdbMetaCompactRatio | Yes | No |
|
| 142 | stream | Yes | No |
|
||||||
| 143 | defaultJSONStrType | Yes | No |
|
| 143 | retrieveBlockingModel | Yes | No |
|
||||||
| 144 | walFlushSize | Yes | No |
|
| 144 | tsdbMetaCompactRatio | Yes | No |
|
||||||
| 145 | keepTimeOffset | Yes | No |
|
| 145 | defaultJSONStrType | Yes | No |
|
||||||
| 146 | flowctrl | Yes | No |
|
| 146 | walFlushSize | Yes | No |
|
||||||
| 147 | slaveQuery | Yes | No |
|
| 147 | keepTimeOffset | Yes | No |
|
||||||
| 148 | adjustMaster | Yes | No |
|
| 148 | flowctrl | Yes | No |
|
||||||
| 149 | topicBinaryLen | Yes | No |
|
| 149 | slaveQuery | Yes | No |
|
||||||
| 150 | telegrafUseFieldNum | Yes | No |
|
| 150 | adjustMaster | Yes | No |
|
||||||
| 151 | deadLockKillQuery | Yes | No |
|
| 151 | topicBinaryLen | Yes | No |
|
||||||
| 152 | clientMerge | Yes | No |
|
| 152 | telegrafUseFieldNum | Yes | No |
|
||||||
| 153 | sdbDebugFlag | Yes | No |
|
| 153 | deadLockKillQuery | Yes | No |
|
||||||
| 154 | odbcDebugFlag | Yes | No |
|
| 154 | clientMerge | Yes | No |
|
||||||
| 155 | httpDebugFlag | Yes | No |
|
| 155 | sdbDebugFlag | Yes | No |
|
||||||
| 156 | monDebugFlag | Yes | No |
|
| 156 | odbcDebugFlag | Yes | No |
|
||||||
| 157 | cqDebugFlag | Yes | No |
|
| 157 | httpDebugFlag | Yes | No |
|
||||||
| 158 | shortcutFlag | Yes | No |
|
| 158 | monDebugFlag | Yes | No |
|
||||||
| 159 | probeSeconds | Yes | No |
|
| 159 | cqDebugFlag | Yes | No |
|
||||||
| 160 | probeKillSeconds | Yes | No |
|
| 160 | shortcutFlag | Yes | No |
|
||||||
| 161 | probeInterval | Yes | No |
|
| 161 | probeSeconds | Yes | No |
|
||||||
| 162 | lossyColumns | Yes | No |
|
| 162 | probeKillSeconds | Yes | No |
|
||||||
| 163 | fPrecision | Yes | No |
|
| 163 | probeInterval | Yes | No |
|
||||||
| 164 | dPrecision | Yes | No |
|
| 164 | lossyColumns | Yes | No |
|
||||||
| 165 | maxRange | Yes | No |
|
| 165 | fPrecision | Yes | No |
|
||||||
| 166 | range | Yes | No |
|
| 166 | dPrecision | Yes | No |
|
||||||
|
| 167 | maxRange | Yes | No |
|
||||||
|
| 168 | range | Yes | No |
|
||||||
|
|
|
@ -47,9 +47,8 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
||||||
|
|
||||||
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
|
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
|
||||||
|
|
||||||
For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label
|
For example, the following data rows write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
|
||||||
is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
|
as "passit" (BINARY), c4 column as 4 (DOUBLE), and the primary key timestamp as 1626006833639000000 to child table with the t1 label as "3" (NCHAR), the t2 label as "4" (NCHAR), and the t3 label as "t3" (NCHAR) and the super table named `st`.
|
||||||
is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
|
|
||||||
|
|
||||||
```json
|
```json
|
||||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
||||||
|
@ -69,7 +68,7 @@ Schemaless writes process row data according to the following principles.
|
||||||
|
|
||||||
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
|
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
|
||||||
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
|
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
|
||||||
You can configure smlChildTableName to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||||
|
|
||||||
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
||||||
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
|
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
|
||||||
|
@ -78,7 +77,7 @@ You can configure smlChildTableName to specify table names, for example, `smlChi
|
||||||
NULL.
|
NULL.
|
||||||
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
||||||
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
||||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat to false. Otherwise, data will be written out of order and a database error will occur.
|
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
||||||
|
|
|
@ -51,5 +51,6 @@ port: 8125
|
||||||
Start StatsD after adding the following (assuming the config file is modified to config.js)
|
Start StatsD after adding the following (assuming the config file is modified to config.js)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
npm install
|
||||||
node stats.js config.js &
|
node stats.js config.js &
|
||||||
```
|
```
|
||||||
|
|
|
@ -22,5 +22,4 @@ An example is as follows.
|
||||||
username = "root"
|
username = "root"
|
||||||
password = "taosdata"
|
password = "taosdata"
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
influx_max_line_bytes = 250
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -30,21 +30,20 @@ After restarting Prometheus, you can refer to the following example to verify th
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
name |
|
||||||
====================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
test | 2022-04-12 08:07:58.756 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
information_schema |
|
||||||
log | 2022-04-20 07:19:50.260 | 2 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
performance_schema |
|
||||||
prometheus_data | 2022-04-20 07:21:09.202 | 158 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
prometheus_data |
|
||||||
db | 2022-04-15 06:37:08.512 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
Query OK, 3 row(s) in set (0.000585s)
|
||||||
Query OK, 4 row(s) in set (0.000585s)
|
|
||||||
|
|
||||||
taos> use prometheus_data;
|
taos> use prometheus_data;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
metrics | 2022-04-20 07:21:09.209 | 2 | 1 | 1389 |
|
metrics |
|
||||||
Query OK, 1 row(s) in set (0.000487s)
|
Query OK, 1 row(s) in set (0.000487s)
|
||||||
|
|
||||||
taos> select * from metrics limit 10;
|
taos> select * from metrics limit 10;
|
||||||
|
@ -89,3 +88,7 @@ VALUE TIMESTAMP
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- TDengine will automatically create unique IDs for sub-table names by the rule.
|
||||||
|
:::
|
||||||
|
|
|
@ -15,6 +15,7 @@ To write Telegraf data to TDengine requires the following preparations.
|
||||||
- The TDengine cluster is deployed and functioning properly
|
- The TDengine cluster is deployed and functioning properly
|
||||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
||||||
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
|
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
|
||||||
|
- Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine.
|
||||||
|
|
||||||
## Configuration steps
|
## Configuration steps
|
||||||
<Telegraf />
|
<Telegraf />
|
||||||
|
@ -31,26 +32,27 @@ Use TDengine CLI to verify Telegraf correctly writing data to TDengine and read
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
name |
|
||||||
====================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
telegraf | 2022-04-20 08:47:53.488 | 22 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
information_schema |
|
||||||
log | 2022-04-20 07:19:50.260 | 9 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
performance_schema |
|
||||||
Query OK, 2 row(s) in set (0.002401s)
|
telegraf |
|
||||||
|
Query OK, 3 rows in database (0.010568s)
|
||||||
|
|
||||||
taos> use telegraf;
|
taos> use telegraf;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
swap | 2022-04-20 08:47:53.532 | 7 | 1 | 1 |
|
swap |
|
||||||
cpu | 2022-04-20 08:48:03.488 | 11 | 2 | 5 |
|
cpu |
|
||||||
system | 2022-04-20 08:47:53.512 | 8 | 1 | 1 |
|
system |
|
||||||
diskio | 2022-04-20 08:47:53.550 | 12 | 2 | 15 |
|
diskio |
|
||||||
kernel | 2022-04-20 08:47:53.503 | 6 | 1 | 1 |
|
kernel |
|
||||||
mem | 2022-04-20 08:47:53.521 | 35 | 1 | 1 |
|
mem |
|
||||||
processes | 2022-04-20 08:47:53.555 | 12 | 1 | 1 |
|
processes |
|
||||||
disk | 2022-04-20 08:47:53.541 | 8 | 5 | 2 |
|
disk |
|
||||||
Query OK, 8 row(s) in set (0.000521s)
|
Query OK, 8 row(s) in set (0.000521s)
|
||||||
|
|
||||||
taos> select * from telegraf.system limit 10;
|
taos> select * from telegraf.system limit 10;
|
||||||
|
@ -65,3 +67,11 @@ taos> select * from telegraf.system limit 10;
|
||||||
|
|
|
|
||||||
Query OK, 3 row(s) in set (0.013269s)
|
Query OK, 3 row(s) in set (0.013269s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- TDengine take influxdb format data and create unique ID for table names by the rule.
|
||||||
|
The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
|
||||||
|
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
|
@ -32,28 +32,29 @@ Use the TDengine CLI to verify that collectd's data is written to TDengine and c
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
name |
|
||||||
====================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
collectd | 2022-04-20 09:27:45.460 | 95 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
information_schema |
|
||||||
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
performance_schema |
|
||||||
Query OK, 2 row(s) in set (0.003266s)
|
collectd |
|
||||||
|
Query OK, 3 row(s) in set (0.003266s)
|
||||||
|
|
||||||
taos> use collectd;
|
taos> use collectd;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
load_1 | 2022-04-20 09:27:45.492 | 2 | 2 | 1 |
|
load_1 |
|
||||||
memory_value | 2022-04-20 09:27:45.463 | 2 | 3 | 6 |
|
memory_value |
|
||||||
df_value | 2022-04-20 09:27:45.463 | 2 | 4 | 25 |
|
df_value |
|
||||||
load_2 | 2022-04-20 09:27:45.501 | 2 | 2 | 1 |
|
load_2 |
|
||||||
load_0 | 2022-04-20 09:27:45.485 | 2 | 2 | 1 |
|
load_0 |
|
||||||
interface_1 | 2022-04-20 09:27:45.488 | 2 | 3 | 12 |
|
interface_1 |
|
||||||
irq_value | 2022-04-20 09:27:45.476 | 2 | 3 | 31 |
|
irq_value |
|
||||||
interface_0 | 2022-04-20 09:27:45.480 | 2 | 3 | 12 |
|
interface_0 |
|
||||||
entropy_value | 2022-04-20 09:27:45.473 | 2 | 2 | 1 |
|
entropy_value |
|
||||||
swap_value | 2022-04-20 09:27:45.477 | 2 | 3 | 5 |
|
swap_value |
|
||||||
Query OK, 10 row(s) in set (0.002236s)
|
Query OK, 10 row(s) in set (0.002236s)
|
||||||
|
|
||||||
taos> select * from collectd.memory_value limit 10;
|
taos> select * from collectd.memory_value limit 10;
|
||||||
|
@ -72,3 +73,7 @@ taos> select * from collectd.memory_value limit 10;
|
||||||
Query OK, 10 row(s) in set (0.010348s)
|
Query OK, 10 row(s) in set (0.010348s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- TDengine will automatically create unique IDs for sub-table names by the rule.
|
||||||
|
:::
|
||||||
|
|
|
@ -26,7 +26,7 @@ Start StatsD:
|
||||||
```
|
```
|
||||||
$ node stats.js config.js &
|
$ node stats.js config.js &
|
||||||
[1] 8546
|
[1] 8546
|
||||||
$ 20 Apr 09:54:41 - [8546] reading config file: exampleConfig.js
|
$ 20 Apr 09:54:41 - [8546] reading config file: config.js
|
||||||
20 Apr 09:54:41 - server is up INFO
|
20 Apr 09:54:41 - server is up INFO
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -40,19 +40,20 @@ Use the TDengine CLI to verify that StatsD data is written to TDengine and can r
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
name |
|
||||||
====================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
information_schema |
|
||||||
statsd | 2022-04-20 09:54:51.220 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
performance_schema |
|
||||||
Query OK, 2 row(s) in set (0.003142s)
|
statsd |
|
||||||
|
Query OK, 3 row(s) in set (0.003142s)
|
||||||
|
|
||||||
taos> use statsd;
|
taos> use statsd;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
foo | 2022-04-20 09:54:51.234 | 2 | 1 | 1 |
|
foo |
|
||||||
Query OK, 1 row(s) in set (0.002161s)
|
Query OK, 1 row(s) in set (0.002161s)
|
||||||
|
|
||||||
taos> select * from foo;
|
taos> select * from foo;
|
||||||
|
@ -63,3 +64,8 @@ Query OK, 1 row(s) in set (0.004179s)
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- TDengine will automatically create unique IDs for sub-table names by the rule.
|
||||||
|
:::
|
||||||
|
|
|
@ -36,39 +36,45 @@ After waiting about 10 seconds, use the TDengine CLI to query TDengine to verify
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
name |
|
||||||
====================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
information_schema |
|
||||||
icinga2 | 2022-04-20 12:11:39.697 | 13 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
performance_schema |
|
||||||
Query OK, 2 row(s) in set (0.001867s)
|
icinga2 |
|
||||||
|
Query OK, 3 row(s) in set (0.001867s)
|
||||||
|
|
||||||
taos> use icinga2;
|
taos> use icinga2;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
icinga.service.users.state_... | 2022-04-20 12:11:39.726 | 2 | 1 | 1 |
|
icinga.service.users.state_... |
|
||||||
icinga.service.users.acknow... | 2022-04-20 12:11:39.756 | 2 | 1 | 1 |
|
icinga.service.users.acknow... |
|
||||||
icinga.service.procs.downti... | 2022-04-20 12:11:44.541 | 2 | 1 | 1 |
|
icinga.service.procs.downti... |
|
||||||
icinga.service.users.users | 2022-04-20 12:11:39.770 | 2 | 1 | 1 |
|
icinga.service.users.users |
|
||||||
icinga.service.procs.procs_min | 2022-04-20 12:11:44.599 | 2 | 1 | 1 |
|
icinga.service.procs.procs_min |
|
||||||
icinga.service.users.users_min | 2022-04-20 12:11:39.809 | 2 | 1 | 1 |
|
icinga.service.users.users_min |
|
||||||
icinga.check.max_check_atte... | 2022-04-20 12:11:39.847 | 2 | 3 | 2 |
|
icinga.check.max_check_atte... |
|
||||||
icinga.service.procs.state_... | 2022-04-20 12:11:44.522 | 2 | 1 | 1 |
|
icinga.service.procs.state_... |
|
||||||
icinga.service.procs.procs_... | 2022-04-20 12:11:44.576 | 2 | 1 | 1 |
|
icinga.service.procs.procs_... |
|
||||||
icinga.service.users.users_... | 2022-04-20 12:11:39.796 | 2 | 1 | 1 |
|
icinga.service.users.users_... |
|
||||||
icinga.check.latency | 2022-04-20 12:11:39.869 | 2 | 3 | 2 |
|
icinga.check.latency |
|
||||||
icinga.service.procs.procs_... | 2022-04-20 12:11:44.588 | 2 | 1 | 1 |
|
icinga.service.procs.procs_... |
|
||||||
icinga.service.users.downti... | 2022-04-20 12:11:39.746 | 2 | 1 | 1 |
|
icinga.service.users.downti... |
|
||||||
icinga.service.users.users_... | 2022-04-20 12:11:39.783 | 2 | 1 | 1 |
|
icinga.service.users.users_... |
|
||||||
icinga.service.users.reachable | 2022-04-20 12:11:39.736 | 2 | 1 | 1 |
|
icinga.service.users.reachable |
|
||||||
icinga.service.procs.procs | 2022-04-20 12:11:44.565 | 2 | 1 | 1 |
|
icinga.service.procs.procs |
|
||||||
icinga.service.procs.acknow... | 2022-04-20 12:11:44.554 | 2 | 1 | 1 |
|
icinga.service.procs.acknow... |
|
||||||
icinga.service.procs.state | 2022-04-20 12:11:44.509 | 2 | 1 | 1 |
|
icinga.service.procs.state |
|
||||||
icinga.service.procs.reachable | 2022-04-20 12:11:44.532 | 2 | 1 | 1 |
|
icinga.service.procs.reachable |
|
||||||
icinga.check.current_attempt | 2022-04-20 12:11:39.825 | 2 | 3 | 2 |
|
icinga.check.current_attempt |
|
||||||
icinga.check.execution_time | 2022-04-20 12:11:39.898 | 2 | 3 | 2 |
|
icinga.check.execution_time |
|
||||||
icinga.service.users.state | 2022-04-20 12:11:39.704 | 2 | 1 | 1 |
|
icinga.service.users.state |
|
||||||
Query OK, 22 row(s) in set (0.002317s)
|
Query OK, 22 row(s) in set (0.002317s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- TDengine will automatically create unique IDs for sub-table names by the rule.
|
||||||
|
:::
|
||||||
|
|
|
@ -33,35 +33,41 @@ Wait for a few seconds and then use the TDengine CLI to query whether the corres
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show databases;
|
taos> show databases;
|
||||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
name |
|
||||||
====================================================================================================================================================================================================================================================================================
|
=================================
|
||||||
tcollector | 2022-04-20 12:44:49.604 | 88 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
information_schema |
|
||||||
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
performance_schema |
|
||||||
Query OK, 2 row(s) in set (0.002679s)
|
tcollector |
|
||||||
|
Query OK, 3 rows in database (0.001647s)
|
||||||
|
|
||||||
taos> use tcollector;
|
taos> use tcollector;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show stables;
|
taos> show stables;
|
||||||
name | created_time | columns | tags | tables |
|
name |
|
||||||
============================================================================================
|
=================================
|
||||||
proc.meminfo.hugepages_rsvd | 2022-04-20 12:44:53.945 | 2 | 1 | 1 |
|
proc.meminfo.hugepages_rsvd |
|
||||||
proc.meminfo.directmap1g | 2022-04-20 12:44:54.110 | 2 | 1 | 1 |
|
proc.meminfo.directmap1g |
|
||||||
proc.meminfo.vmallocchunk | 2022-04-20 12:44:53.724 | 2 | 1 | 1 |
|
proc.meminfo.vmallocchunk |
|
||||||
proc.meminfo.hugepagesize | 2022-04-20 12:44:54.004 | 2 | 1 | 1 |
|
proc.meminfo.hugepagesize |
|
||||||
tcollector.reader.lines_dro... | 2022-04-20 12:44:49.675 | 2 | 1 | 1 |
|
tcollector.reader.lines_dro... |
|
||||||
proc.meminfo.sunreclaim | 2022-04-20 12:44:53.437 | 2 | 1 | 1 |
|
proc.meminfo.sunreclaim |
|
||||||
proc.stat.ctxt | 2022-04-20 12:44:55.363 | 2 | 1 | 1 |
|
proc.stat.ctxt |
|
||||||
proc.meminfo.swaptotal | 2022-04-20 12:44:53.158 | 2 | 1 | 1 |
|
proc.meminfo.swaptotal |
|
||||||
proc.uptime.total | 2022-04-20 12:44:52.813 | 2 | 1 | 1 |
|
proc.uptime.total |
|
||||||
tcollector.collector.lines_... | 2022-04-20 12:44:49.895 | 2 | 2 | 51 |
|
tcollector.collector.lines_... |
|
||||||
proc.meminfo.vmallocused | 2022-04-20 12:44:53.704 | 2 | 1 | 1 |
|
proc.meminfo.vmallocused |
|
||||||
proc.meminfo.memavailable | 2022-04-20 12:44:52.939 | 2 | 1 | 1 |
|
proc.meminfo.memavailable |
|
||||||
sys.numa.foreign_allocs | 2022-04-20 12:44:57.929 | 2 | 2 | 1 |
|
sys.numa.foreign_allocs |
|
||||||
proc.meminfo.committed_as | 2022-04-20 12:44:53.639 | 2 | 1 | 1 |
|
proc.meminfo.committed_as |
|
||||||
proc.vmstat.pswpin | 2022-04-20 12:44:54.177 | 2 | 1 | 1 |
|
proc.vmstat.pswpin |
|
||||||
proc.meminfo.cmafree | 2022-04-20 12:44:53.865 | 2 | 1 | 1 |
|
proc.meminfo.cmafree |
|
||||||
proc.meminfo.mapped | 2022-04-20 12:44:53.349 | 2 | 1 | 1 |
|
proc.meminfo.mapped |
|
||||||
proc.vmstat.pgmajfault | 2022-04-20 12:44:54.251 | 2 | 1 | 1 |
|
proc.vmstat.pgmajfault |
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- TDengine will automatically create unique IDs for sub-table names by the rule.
|
||||||
|
:::
|
||||||
|
|
|
@ -5,7 +5,7 @@ title: Architecture
|
||||||
|
|
||||||
## Cluster and Primary Logic Unit
|
## Cluster and Primary Logic Unit
|
||||||
|
|
||||||
The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs, significantly.
|
The design of TDengine is based on the assumption that any hardware or software system is not 100% reliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, since day one, TDengine has been designed as a natively distributed system, with high-reliability architecture, and can be scaled out easily. Hardware failure or software failure of a single, or even multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware resource needs significantly.
|
||||||
|
|
||||||
### Primary Logic Unit
|
### Primary Logic Unit
|
||||||
|
|
||||||
|
@ -15,44 +15,50 @@ Logical structure diagram of TDengine's distributed architecture is as follows:
|
||||||
|
|
||||||
<center> Figure 1: TDengine architecture diagram </center>
|
<center> Figure 1: TDengine architecture diagram </center>
|
||||||
|
|
||||||
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
|
A complete TDengine system runs on one or more physical nodes. Logically, a complete system includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TDengine client driver (TAOSC). The following is a brief introduction to each logical unit.
|
||||||
|
|
||||||
**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name).
|
**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please check [wikipedia](https://en.wikipedia.org/wiki/Fully_qualified_domain_name).
|
||||||
|
|
||||||
**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE) and zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
|
**Data node (dnode):** A dnode is a running instance of the TDengine server `taosd` on a physical node (pnode). A working system must have at least one data node. A dnode contains zero to multiple virtual nodes (VNODE) and zero or at most one management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
|
||||||
|
|
||||||
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
|
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a database is created, some vnodes are created for the database. The number of vnodes that can be created on a specific dnode depends on the available system resources. Each vnode must belong to a single DB, while each DB can have multiple vnodes. Each vnodes stores time series data plus the schema, tags of the tables hosted by it. A vnode is identified by the EP of the dnode it belongs to and the unique ID of the vgruop it belongs to. Vgroups are created and managed by mnode.
|
||||||
|
|
||||||
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
|
**Management node (mnode)**: A virtual logical unit (M in the figure) responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes. At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 3) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). mnode adopts RAFT protocol to guarantee high data availability and high data reliability. Any data operation can only be performed through the Leader in the RAFT group. The first mnode in the mnode RAFT group is created automatically when the first dnode of the cluster is deployed. Other two follower mnodes need to be created through SQL command in TDengine CLI. There can be at most one mnode in a single dnode, and the mnode is identified by the EP of the dnode where it's located. Each dnode can communicate with each other to automatically get the EP of all mnodes.
|
||||||
|
|
||||||
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
|
**Computation node (qnode)**: A virtual logical unit (Q in the figure) responsible for executing query and computing tasks including the `show` commands based on system built-in tables. There can be multiple qnodes configured in a TDengine cluster to share the query and computing tasks. A qnode is not coupled with a specific database, that means each qnode can execute the query tasks for multiple databases in parallel. There can be at most one qnode in a single dnode, and the qnode is identified by the EP of the dnode. TDengine client driver can get the list of qnodes through the communication with mnode. If there is no qnode available in the system, query and computing tasks are executed by vnodes. When a query task is executed, according to the execution plan, one or more qnodes may be scheduled by the scheduler to execute the task. qnode can get data from vnode, and send the execution result to other qnodes for further processing. With introducing qnodes, TDengine achieves the separation between storage and computing.
|
||||||
|
|
||||||
**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
|
**Stream Processing node (snode)**: A virtual logical unit (S in the figure) responsible for stream processing tasks is introduced in TDengine. There can be multiple snodes configured in a TDengine cluster to share the burden of stream processing tasks. snode is not coupled with a specific stream, that means a single snode can execute the tasks of multiple streams. There can be at most one snode in a single dnode, it's identified by the EP of the dnode. mnode schedules available snodes to perform the stream processing tasks. If there is no snode available in the system, stream processing tasks are executed in vnodes.
|
||||||
|
|
||||||
|
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed using RAFT protocol. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating a DB, and the default is 1. Using the multiple replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID, to each vgroup. Virtual nodes with the same vnode group ID belong to the same vgroup. If `replica` is set to 1, it means no data replication. The number of replication for a database can be dynamically changed to 3 for high data reliability. Even if a virtual node group is deleted, its ID will not be reused.
|
||||||
|
|
||||||
|
**TDengine client driver**: TAOSC is the abbreviation for TDengine client driver provided by TDengine to applications. It is responsible for dealing with the interaction between applications and the cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TDengine client driver instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc, to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. Another critical component in TDengine product, named `taosAdapter` which provides fully distributed RESTful interface, also invokes TDengine client driver to communicate with TDengine cluster.
|
||||||
|
|
||||||
### Node Communication
|
### Node Communication
|
||||||
|
|
||||||
**Communication mode**: The communication among each data node of TDengine system, and among the client driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digitally sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation.
|
**Communication mode**: The communication among data nodes of TDengine system, and among the client driver and each data node is carried out through TCP. TDengine automatically compress/decompress data and sign/authorize according to configuration and data packets.
|
||||||
|
|
||||||
**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter “fqdn”. If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter “fqdn” of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly.
|
**FQDN configuration:** A data node may have one or more FQDNs, which can be specified with the parameter `fqdn` in the system configuration file `taos.cfg`. If it is not specified, TDengine will automatically use the hostname of the computer as its FQDN. IP address also can be used to configure `fqdn` but it's not a recommended way because IP address may vary. Once the IP address is changed, the whole TDengine cluster will not work. The end point of a data node is composed of FQDN and prot number. It is necessary to ensure the DNS service is running or hosts files on nodes are configured properly to make sure FQDN works.
|
||||||
|
|
||||||
**Port configuration**: The external port of a data node is determined by the system configuration parameter “serverPort” in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort.
|
**Port configuration**: The port of a data node is configured with parameter `serverPort` in `taosc.cfg`.
|
||||||
|
|
||||||
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
|
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting TDengine CLI `taos`, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the configuration parameter `serverPort` of TDengine will be used.
|
||||||
|
|
||||||
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
|
**Inter-cluster communication**: Data nodes connect with each other through TCP. When a data node starts, it will obtain the EP of the dnode where the mnode is located, and then establish a connection with the mnode to exchange information. There are three steps to obtain EP information of the mnode:
|
||||||
|
|
||||||
1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
|
1. Check whether `dnode.json` file exists, if it does not exist or cannot be opened normally, skip to the second step;
|
||||||
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
|
2. Check the system configuration file `taos.cfg` to obtain node configuration parameters `firstEp` and `secondEp` (the nodes specified by these two parameters can be a normal node without mnode, in this case the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg or are invalid, skip to the third step;
|
||||||
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
|
3. Set your own EP as a mnode EP and run it independently.
|
||||||
|
|
||||||
**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
|
After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection is established successfully. If not successful, it will try the next item in the mnode EP list. If all attempts failed, the dnode will sleep for a few seconds and try again.
|
||||||
|
|
||||||
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
|
**Create MNODE**: The management node (mnode) in TDengine is a logical node without specific process. In other words, mnode also runs in a dnode, which is a real process on operating system. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when the first dnode in the cluster starts, it becomes mnode automatically, and the other mnodes need to be created using SQL in TDengine CLI.
|
||||||
|
|
||||||
- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
**Add new data nodes:** After the first data node starts successfully, the system can begin to work. There are two steps to add a new data node into the cluster.
|
||||||
- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
|
||||||
|
|
||||||
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the mnode with the EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
- Step : Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
||||||
|
- Step 2: In the system configuration parameter file `taos.cfg` of the new data node, set the `firstEp` and `secondEp` parameters to the EP of any two data nodes in the existing cluster. If there is only one existing data node in the system, skip parameter `secondEp`. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||||
|
|
||||||
|
**Redirection**: Regardless of dnode or TAOSC, the connection to the mnode is initiated first. The mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or TAOSC, if it’s not an mnode itself, it will reply to the connection initiator with the mnode EP List. After receiving this list, TAOSC or the newly started dnode will try to establish the connection again with mnode. When the mnode EP List changes, each data node quickly obtains the latest list and notifies TAOSC through messaging interaction among nodes.
|
||||||
|
|
||||||
### A Typical Data Writing Process
|
### A Typical Data Writing Process
|
||||||
|
|
||||||
|
@ -62,18 +68,20 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei
|
||||||
|
|
||||||
<center> Figure 2: Typical process of TDengine </center>
|
<center> Figure 2: Typical process of TDengine </center>
|
||||||
|
|
||||||
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
|
1. Application initiates a request to insert data through JDBC, or other APIs.
|
||||||
2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
|
2. TAOSC checks the cache to see if the vgroups-info for the database being requested to insert data exists. If the vgroups-info exists, it goes straight to Step 4. Otherwise, TAOSC sends a get meta-data request to mnode.
|
||||||
3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
|
3. Mnode returns the vgroups-info of the database to TAOSC. The vgroups-info contains the distribution of the vgroups of the database, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
|
||||||
4. TAOSC initiates an insert request to leader vnode.
|
4. TAOSC checks to see whether the metadata for the table to be inserted is in cache. If yes, skip to step 6; otherwise taosc sends a request to corresponding to get the metadata for the table.
|
||||||
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
|
5. vnode returns the metadata for the table to TAOSC, the metadata includes the table's schema.
|
||||||
6. TAOSC notifies APP that writing is successful.
|
6. TAOSC initiates an insert request to leader vnode of the table.
|
||||||
|
7. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
|
||||||
|
8. TAOSC notifies APP that writing is successful.
|
||||||
|
|
||||||
For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode.
|
For Step 2, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to the EP of another mnode to obtain meta-data .
|
||||||
|
|
||||||
For Step 4 and 5, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to the actual leader as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node.
|
For Step 4 and 6, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to TAOSC with the actual leader, then TAOC will send a request to the true leader. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node for further use.
|
||||||
|
|
||||||
The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
|
The above flow describes the process of inserting data. The process of querying and computing are similar. TAOSC encapsulates and hides all these complicated processes so that it is transparent to applications.
|
||||||
|
|
||||||
Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache.
|
Through TAOSC caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), TAOSC will interact with mnode regularly to automatically update the cache.
|
||||||
|
|
||||||
|
@ -81,15 +89,15 @@ Through TAOSC caching mechanism, mnode needs to be accessed only when a table is
|
||||||
|
|
||||||
### Storage Model
|
### Storage Model
|
||||||
|
|
||||||
The data stored by TDengine includes collected time-series data, metadata related to database and tables, tag data, etc. All of the data is specifically divided into three parts:
|
The data stored by TDengine includes collected time-series data, metadata and tag data related to database and tablesetc. All of the data is specifically divided into three parts:
|
||||||
|
|
||||||
- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database “update” parameter is set to 1. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point.
|
- Time-series data: stored in vnode and composed of data, head and last files. Normally the amount of time series data is very huge and query amount depends on the application scenario. Out-of-order writing is allowed. By adopting the model with **one table for each data collection point**, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the best performance for both insert and query operations of a single data collection point.
|
||||||
- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even with millions of tables, the tag filtering results will return in milliseconds.
|
- Table Metadata: table meta data includes tags and table schema and is stored in meta file in each vnode. CRUD can be operated on table metadata. There is a specific record for each table, so the amount of table meta data depends on the number of tables. Table meta data is stored in LRU model and supports index for tag data. TDengine can support multiple queries in parallel. As long as the memory resource is enough, meta data is all stored in memory for quick access. The filtering on tens of millions of tags can be finished in a few milliseconds. Even though when the memory resource is not sufficient, TDengine can still perform high speed query on tens of millions of tables.
|
||||||
- Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck.
|
- Database Metadata: stored in mnode and includes system node, user, DB, table schema and other information. Four standard operations of create, delete, update and read are supported. The amount of this data is not large and can be stored in memory. Moreover, the number of queries is not large because of client cache. Even though TDengine uses centralized storage management, because of the architecture, there is no performance bottleneck.
|
||||||
|
|
||||||
Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages:
|
Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately. This has two major advantages:
|
||||||
|
|
||||||
- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation.
|
- Reduces the redundancy of tag data storage significantly. General NoSQL database or time-series database adopts K-V (key-value) storage, in which the key includes a timestamp, a device ID and various tags. Each record carries these duplicated tags, so much storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is an extremely expensive operation.
|
||||||
- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
|
- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it first finds the tables which satisfy the filtering conditions, and then finds the corresponding data blocks of these tables. This greatly reduces the data sets to be scanned which in turn improves the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
|
||||||
|
|
||||||
### Data Sharding
|
### Data Sharding
|
||||||
|
@ -106,36 +114,26 @@ The meta data of each table (including schema, tags, etc.) is also stored in vno
|
||||||
|
|
||||||
### Data Partitioning
|
### Data Partitioning
|
||||||
|
|
||||||
In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `“days”`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `“keep”`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs.
|
In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by the database configuration parameter `duration`. This method of partitioning by time range is also convenient to efficiently implement data retention policies. As long as the data file exceeds the specified number of days (system configuration parameter `keep`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate tiered-storage. Cold/hot data can be stored in different storage media to significantly reduce storage costs.
|
||||||
|
|
||||||
In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability.
|
In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability.
|
||||||
|
|
||||||
### Load Balancing
|
|
||||||
|
|
||||||
Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) so that the mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected.
|
|
||||||
|
|
||||||
If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. If the dnode stays offline beyond the time configured by parameter `“offlineThreshold”`, the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number.
|
|
||||||
|
|
||||||
When new data nodes are added to the cluster, with new computing and storage resources, the system will automatically start the load balancing process.
|
|
||||||
|
|
||||||
The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.**
|
|
||||||
|
|
||||||
## Data Writing and Replication Process
|
## Data Writing and Replication Process
|
||||||
|
|
||||||
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
|
TDengine utilizes RAFT protocol to replicate data. If a database has N replicas, a virtual node group has N virtual nodes, N can be either 1 or 3. In each vnode group, only one is the Leader and all others are followers. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notify TAOSC to redirect the request to the leader.
|
||||||
|
|
||||||
### Leader vnode Writing Process
|
### Leader vnode Writing Process
|
||||||
|
|
||||||
Leader Vnode uses a writing process as follows:
|
Leader Vnode uses a writing process as follows:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> Figure 3: TDengine Leader writing process </center>
|
<center> Figure 3: TDengine Leader writing process </center>
|
||||||
|
|
||||||
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
||||||
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
|
2. Leader vnode will write the original request packet into database log file WAL. If the database configuration parameter `“wal_level”` is set to 1, vnode doesn't invoked fsync. If `wal_level` is set to 2, fsync is invoked according to another database parameter `wal_fsync_period`.
|
||||||
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
3. If there are multiple replicas, the leader vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||||
4. Write into memory and add the record to “skip list”;
|
4. Leader vnode Writes the data into memory and add the record to “skip list”;
|
||||||
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
||||||
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
||||||
|
|
||||||
|
@ -143,74 +141,53 @@ Leader Vnode uses a writing process as follows:
|
||||||
|
|
||||||
For a follower vnode, the write process as follows:
|
For a follower vnode, the write process as follows:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> Figure 4: TDengine Follower Writing Process </center>
|
<center> Figure 4: TDengine Follower Writing Process </center>
|
||||||
|
|
||||||
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
||||||
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
|
2. The behavior regarding `wal_level` and `wal_fsync_period` in a follower vnode is same as the leader vnode.
|
||||||
3. Write into memory and add the record to “skip list”.
|
3. Write into memory and add the record to “skip list”.
|
||||||
|
|
||||||
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
|
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step. But writing into memory and WAL is exactly the same.
|
||||||
|
|
||||||
### Remote Disaster Recovery and IDC (Internet Data Center) Migration
|
|
||||||
|
|
||||||
As discussed above, TDengine writes using Leader and Follower processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
|
|
||||||
|
|
||||||
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, leader and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
|
|
||||||
|
|
||||||
However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows:
|
|
||||||
|
|
||||||
1. Leader vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
|
|
||||||
2. Follower vnode receives the write request, then processing fails before writing to the log in Step 2;
|
|
||||||
3. Follower vnode will become the new leader, thus losing one record.
|
|
||||||
|
|
||||||
In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above.
|
|
||||||
|
|
||||||
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
|
|
||||||
|
|
||||||
### Leader/follower Selection
|
### Leader/follower Selection
|
||||||
|
|
||||||
Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
|
Vnode maintains a version number. When memory data is persisted, the version number is also persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
|
||||||
|
|
||||||
When a vnode starts, the roles (leader, follower) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a leader-selection process. The rules are as follows:
|
When a vnode starts, its role (leader, follower) is uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other vnodes in the virtual node group and exchange status, including version and its own role. Through the exchange, the system implements a leader-selection process according to standard RAFT protocol.
|
||||||
|
|
||||||
1. If there’s only one replica, it’s always leader
|
|
||||||
2. When all replicas are online, the one with latest version is leader
|
|
||||||
3. Over half of online nodes are virtual nodes, and some virtual node is follower, it will automatically become leader
|
|
||||||
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as leader.
|
|
||||||
|
|
||||||
### Synchronous Replication
|
### Synchronous Replication
|
||||||
|
|
||||||
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Leader forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in follower. If “quorum-1” reply confirms are not received within a certain period of time, the leader vnode will return an error to the application.
|
For scenarios with strong data consistency requirements, asynchronous data replication is not enough, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users to choose. When creating a database, in addition to specifying the number of replicas by parameter `replica`, user also needs to specify a new parameter `strict`. If `strict` is set to 1, it means the leader vnode can return success to the client only after over half of the followers vnodes have confirmed the data has been replicated to them. If any follower vnode is offline and the leader vnode can't get confirmation from over half of follower vnodes, the leader vnode will return failure to the client.
|
||||||
|
|
||||||
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication.
|
With synchronous replication, the system performance will decrease and latency will increase. Because metadata needs strong consistency, the default policy for data replication between mnodes is synchronous mode.
|
||||||
|
|
||||||
## Caching and Persistence
|
## Caching and Persistence
|
||||||
|
|
||||||
### Caching
|
### Caching
|
||||||
|
|
||||||
TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer.
|
TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), it directly puts the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the most recently generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer.
|
||||||
|
|
||||||
TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can effectively simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems.
|
TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data cache by setting appropriate configuration parameters without deploying Redis or other additional cache systems**. This can significantly simplify the system architecture and reduce operational costs. It should be noted that after TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer. In this sense, TDengine's cache differs from proprietary key-value cache systems.
|
||||||
|
|
||||||
Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”.
|
Each vnode has its own independent memory composed of multiple memory blocks of fixed size, and the memory of different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the data will be persisted to disk storage, and the subsequent writing operation will be carried out in a new memory block. By this design, one third of the memory blocks in a vnode keeps the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter `buffer`.
|
||||||
|
|
||||||
### Persistent Storage
|
### Persistent Storage
|
||||||
|
|
||||||
TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successfull persistence, to avoid unlimited log growth.
|
TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain amount, TDengine will pull up the disk-writing thread to write the cached data into persistent storage so that subsequent data writing is not blocked. TDengine will open a new database log file when the data is written, and delete the old database log file after successful persistence, to avoid unlimited log growth.
|
||||||
|
|
||||||
To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `“days”`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations.
|
To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `duration`. Thus for given start and end dates of a query, you can locate the data files to open immediately without any index. This greatly speeds up read operations.
|
||||||
|
|
||||||
For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `“keep”`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
|
For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `keep`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
|
||||||
|
|
||||||
Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set.
|
Given `duration` and `keep` parameters, the total number of data files in a vnode is: round up of (keep/duration+1). The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable `duration` can be set. In the current version, parameter `keep` can be modified, but parameter `duration` cannot be modified once it is set.
|
||||||
|
|
||||||
In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `“maxRows”` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer tim. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
|
In each data file, the data of a table is stored in blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `maxRows` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, data location for queries will take a longer time. If it is too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
|
||||||
|
|
||||||
Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `“minRows”` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file.
|
Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information which allows the system to locate the data to be found very quickly. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `minRows` (minimum number of records per block), it will be stored in the last file first. At the next write operation to the disk, the newly written records will be merged with the records in last file and then written into data file.
|
||||||
|
|
||||||
When data is written to disk, the system decideswhether to compress the data based on the system configuration parameter `“comp”`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
|
When data is written to disk, the system decides whether to compress the data based on the database configuration parameter `comp`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
|
||||||
|
|
||||||
### Tiered Storage
|
### Tiered Storage
|
||||||
|
|
||||||
|
@ -241,19 +218,20 @@ Note: Tiered Storage is only supported in Enterprise Edition
|
||||||
|
|
||||||
## Data Query
|
## Data Query
|
||||||
|
|
||||||
TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode and mnode.
|
TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. Query processing in TDengine needs the collaboration of client, vnode, qnode and mnode. A complex aggregate query on a super table may need multiple vnodes and multiple qnodes to share the query and computing tasks.
|
||||||
|
|
||||||
### Single Table Query
|
### Query Process
|
||||||
|
|
||||||
The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then metadata information (table metadata) for the table specified is requested in the query from management node (mnode).
|
1. TDengine client driver `taosc` parses the SQL statement and generates an abstract syntax tree (AST), then checks and verifies the AST according to metadata. During this stage, the metadata management module in `taosc` (Catalog) requests the metadata of the involved database and table from mnode and vnode.
|
||||||
|
2. After the verification passes, `taosc` generates distributed query plan and optimizes the plan.
|
||||||
According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time.
|
3. `taosc` schedules the tasks according to configured query policy, a query sub-task may be scheduled to a vnode or qnode according to data relative and system load. Please be noted that both vnode and qnode are logic execution unit, the physical execution node is dnode (data node).
|
||||||
|
4. When a dnode receives a query request, it identifies which vnode or qnode this query request is targeted, and forwards the request to the query execution queue of the identified vnode or qnode.
|
||||||
When client obtains query result, the worker thread in query execution queue of dnode will wait for the execution of vnode execution thread to complete before returning the query result to the requesting client.
|
5. The query execution thread of the vnode or qnode establishes fundamental query execution context, and executes the query, and notifies the client once obtaining a part of result data.
|
||||||
|
6. TDengine client driver `taosc` will initiate next level query tasks or obtain the result simply.
|
||||||
|
|
||||||
### Aggregation by Time Axis, Downsampling, Interpolation
|
### Aggregation by Time Axis, Downsampling, Interpolation
|
||||||
|
|
||||||
Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases which is different from that of common databases. It is similar to the window query of stream computing engines.
|
Time-series data is different from ordinary data in that each record has a timestamp. So aggregating data by timestamps on the time axis is an important and distinct feature of time-series databases compared with common databases. It is similar to the window query of stream computing engines.
|
||||||
|
|
||||||
The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example:
|
The keyword `interval` is introduced into TDengine to split fixed length time windows on the time axis. The data is aggregated based on time windows, and the data within time window ranges is aggregated as needed. For example:
|
||||||
|
|
||||||
|
@ -269,24 +247,32 @@ In application scenarios where query results need to be obtained continuously, i
|
||||||
select count(*) from d1001 interval(1h) fill(prev);
|
select count(*) from d1001 interval(1h) fill(prev);
|
||||||
```
|
```
|
||||||
|
|
||||||
For the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value).
|
In case that the query result needs to be obtained continuously, if there is data loss in a given time range, the resulting data for the time range may be lost too. TDengine provides interpolation for the aggregation result by time window, using `fill` keyword. For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT COUNT(*) FROM d1001 WHERE ts >= '2017-7-14 00:00:00' AND ts < '2017-7-14 23:59:59' INTERVAL(1h) FILL(PREV);
|
||||||
|
```
|
||||||
|
|
||||||
|
For the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value filling (NULL), and specific value filling (value).
|
||||||
|
|
||||||
### Multi-table Aggregation Query
|
### Multi-table Aggregation Query
|
||||||
|
|
||||||
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:
|
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection points. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tags. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STable by specifying tag filters. This greatly simplifies the development of applications. The process for aggregation across multiple tables is shown in the following figure:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
<center> Figure 5: Diagram of multi-table aggregation query </center>
|
<center> Figure 5: Diagram of multi-table aggregation query </center>
|
||||||
|
|
||||||
1. Application sends a query condition to system;
|
1. Client requests the metadata for the database and tables from mnode
|
||||||
2. TAOSC sends the STable name to Meta Node(management node);
|
2. mnode returns the requested metadata
|
||||||
3. Management node sends the vnode list owned by the STable back to TAOSC;
|
3. Client sends query requests to every vnode of the STable
|
||||||
4. TAOSC sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes;
|
4. Each vnode performs query locally, and returns the query response to client
|
||||||
5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
|
5. Client sends query request to aggregation node, i.e. qnode
|
||||||
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
|
6. qnode requests the query result data from the vnodes involved
|
||||||
|
7. Each vnode returns its local query result data
|
||||||
|
8. qnode aggregates the result and returns the final result to the client
|
||||||
|
|
||||||
Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
|
Since TDengine stores tag data and time-series data separately in vnode, filtering tag data in memory and finding the set of tables that need to participate in aggregation operation can reduce the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions and most operations for ordinary tables are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
|
||||||
|
|
||||||
### Precomputation
|
### Precomputation
|
||||||
|
|
||||||
|
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 192 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 20 KiB |
After Width: | Height: | Size: 22 KiB |
After Width: | Height: | Size: 27 KiB |
Before Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 19 KiB |
|
@ -60,7 +60,6 @@ For the configuration method, add the following text to `/etc/telegraf/telegraf.
|
||||||
username = "<TDengine's username>"
|
username = "<TDengine's username>"
|
||||||
password = "<TDengine's password>"
|
password = "<TDengine's password>"
|
||||||
data_format = "influx"
|
data_format = "influx"
|
||||||
influx_max_line_bytes = 250
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Then restart telegraf:
|
Then restart telegraf:
|
||||||
|
|
|
@ -55,14 +55,16 @@ This error indicates that the client could not connect to the server. Perform th
|
||||||
|
|
||||||
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
|
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
|
||||||
|
|
||||||
8. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
|
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `LD_LIBRARY_PATH` environment variable..
|
||||||
|
|
||||||
9. On Linux systems, you can use the `nc` tool to check whether a port is accessible:
|
9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
|
||||||
|
|
||||||
|
10. On Linux/macOS, you can use the `nc` tool to check whether a port is accessible:
|
||||||
- To check whether a UDP port is open, run `nc -vuz {hostIP} {port}`.
|
- To check whether a UDP port is open, run `nc -vuz {hostIP} {port}`.
|
||||||
- To check whether a TCP port on the server side is open, run `nc -l {port}`.
|
- To check whether a TCP port on the server side is open, run `nc -l {port}`.
|
||||||
- To check whether a TCP port on client side is open, run `nc {hostIP} {port}`.
|
- To check whether a TCP port on client side is open, run `nc {hostIP} {port}`.
|
||||||
|
|
||||||
10. On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible.
|
On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible.
|
||||||
|
|
||||||
11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
|
11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
|
||||||
|
|
||||||
|
@ -104,11 +106,11 @@ ALTER LOCAL local_option
|
||||||
|
|
||||||
local_option: {
|
local_option: {
|
||||||
'resetLog'
|
'resetLog'
|
||||||
| 'rpcDebugFlag' value
|
| 'rpcDebugFlag' 'value'
|
||||||
| 'tmrDebugFlag' value
|
| 'tmrDebugFlag' 'value'
|
||||||
| 'cDebugFlag' value
|
| 'cDebugFlag' 'value'
|
||||||
| 'uDebugFlag' value
|
| 'uDebugFlag' 'value'
|
||||||
| 'debugFlag' value
|
| 'debugFlag' 'value'
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
---
|
|
||||||
sidebar_label: Releases
|
|
||||||
title: Released Versions
|
|
||||||
---
|
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
|
||||||
|
|
||||||
|
|
||||||
<Release versionPrefix="3.0" />
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
---
|
||||||
|
sidebar_label: TDengine
|
||||||
|
title: TDengine
|
||||||
|
description: TDengine release history, Release Notes and download links.
|
||||||
|
---
|
||||||
|
|
||||||
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.1.5
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.5" />
|
||||||
|
|
||||||
|
## 3.0.1.4
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.4" />
|
||||||
|
|
||||||
|
## 3.0.1.3
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.3" />
|
||||||
|
|
||||||
|
## 3.0.1.2
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.2" />
|
||||||
|
|
||||||
|
## 3.0.1.1
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.1" />
|
||||||
|
|
||||||
|
## 3.0.1.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.1.0" />
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
---
|
||||||
|
sidebar_label: taosTools
|
||||||
|
title: taosTools
|
||||||
|
description: taosTools release history, Release Notes, download links.
|
||||||
|
---
|
||||||
|
|
||||||
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.2.6
|
||||||
|
|
||||||
|
<Release type="tools" version="2.2.6" />
|
||||||
|
|
||||||
|
## 2.2.4
|
||||||
|
|
||||||
|
<Release type="tools" version="2.2.4" />
|
||||||
|
|
||||||
|
## 2.2.3
|
||||||
|
|
||||||
|
<Release type="tools" version="2.2.3" />
|
||||||
|
|
||||||
|
## 2.2.2
|
||||||
|
|
||||||
|
<Release type="tools" version="2.2.2" />
|
||||||
|
|
||||||
|
## 2.2.0
|
||||||
|
|
||||||
|
<Release type="tools" version="2.2.0" />
|
||||||
|
|
||||||
|
## 2.1.3
|
||||||
|
|
||||||
|
<Release type="tools" version="2.1.3" />
|
|
@ -0,0 +1 @@
|
||||||
|
label: Releases
|
|
@ -184,22 +184,54 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
|
||||||
tmq_t* build_consumer() {
|
tmq_t* build_consumer() {
|
||||||
tmq_conf_res_t code;
|
tmq_conf_res_t code;
|
||||||
tmq_conf_t* conf = tmq_conf_new();
|
tmq_conf_t* conf = tmq_conf_new();
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "enable.auto.commit", "true");
|
code = tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "group.id", "cgrpName");
|
code = tmq_conf_set(conf, "group.id", "cgrpName");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "client.id", "user defined name");
|
code = tmq_conf_set(conf, "client.id", "user defined name");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "td.connect.user", "root");
|
code = tmq_conf_set(conf, "td.connect.user", "root");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
|
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
|
||||||
if (TMQ_CONF_OK != code) return NULL;
|
if (TMQ_CONF_OK != code) {
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,27 @@
|
||||||
bin
|
|
||||||
obj
|
|
||||||
.vs
|
.vs
|
||||||
*.sln
|
asyncQuery/bin
|
||||||
|
connect/bin
|
||||||
|
influxdbLine/bin
|
||||||
|
optsJSON/bin
|
||||||
|
optsTelnet/bin
|
||||||
|
query/bin
|
||||||
|
sqlInsert/bin
|
||||||
|
stmtInsert/bin
|
||||||
|
subscribe/bin
|
||||||
|
wsConnect/bin
|
||||||
|
wsInsert/bin
|
||||||
|
wsQuery/bin
|
||||||
|
wsStmt/bin
|
||||||
|
asyncQuery/obj
|
||||||
|
connect/obj
|
||||||
|
influxdbLine/obj
|
||||||
|
optsJSON/obj
|
||||||
|
optsTelnet/obj
|
||||||
|
query/obj
|
||||||
|
sqlInsert/obj
|
||||||
|
stmtInsert/obj
|
||||||
|
subscribe/obj
|
||||||
|
wsConnect/obj
|
||||||
|
wsInsert/obj
|
||||||
|
wsQuery/obj
|
||||||
|
wsStmt/obj
|
|
@ -1,82 +0,0 @@
|
||||||
using TDengineDriver;
|
|
||||||
using TDengineDriver.Impl;
|
|
||||||
using System.Runtime.InteropServices;
|
|
||||||
|
|
||||||
namespace TDengineExample
|
|
||||||
{
|
|
||||||
internal class QueryExample
|
|
||||||
{
|
|
||||||
static void Main()
|
|
||||||
{
|
|
||||||
IntPtr conn = GetConnection();
|
|
||||||
// run query
|
|
||||||
IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2");
|
|
||||||
if (TDengine.ErrorNo(res) != 0)
|
|
||||||
{
|
|
||||||
Console.WriteLine("Failed to query since: " + TDengine.Error(res));
|
|
||||||
TDengine.Close(conn);
|
|
||||||
TDengine.Cleanup();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// get filed count
|
|
||||||
int fieldCount = TDengine.FieldCount(res);
|
|
||||||
Console.WriteLine("fieldCount=" + fieldCount);
|
|
||||||
|
|
||||||
// print column names
|
|
||||||
List<TDengineMeta> metas = LibTaos.GetMeta(res);
|
|
||||||
for (int i = 0; i < metas.Count; i++)
|
|
||||||
{
|
|
||||||
Console.Write(metas[i].name + "\t");
|
|
||||||
}
|
|
||||||
Console.WriteLine();
|
|
||||||
|
|
||||||
// print values
|
|
||||||
List<Object> resData = LibTaos.GetData(res);
|
|
||||||
for (int i = 0; i < resData.Count; i++)
|
|
||||||
{
|
|
||||||
Console.Write($"|{resData[i].ToString()} \t");
|
|
||||||
if (((i + 1) % metas.Count == 0))
|
|
||||||
{
|
|
||||||
Console.WriteLine("");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Console.WriteLine();
|
|
||||||
|
|
||||||
if (TDengine.ErrorNo(res) != 0)
|
|
||||||
{
|
|
||||||
Console.WriteLine($"Query is not complete, Error {TDengine.ErrorNo(res)} {TDengine.Error(res)}");
|
|
||||||
}
|
|
||||||
// exit
|
|
||||||
TDengine.FreeResult(res);
|
|
||||||
TDengine.Close(conn);
|
|
||||||
TDengine.Cleanup();
|
|
||||||
}
|
|
||||||
static IntPtr GetConnection()
|
|
||||||
{
|
|
||||||
string host = "localhost";
|
|
||||||
short port = 6030;
|
|
||||||
string username = "root";
|
|
||||||
string password = "taosdata";
|
|
||||||
string dbname = "power";
|
|
||||||
var conn = TDengine.Connect(host, username, password, dbname, port);
|
|
||||||
if (conn == IntPtr.Zero)
|
|
||||||
{
|
|
||||||
Console.WriteLine("Connect to TDengine failed");
|
|
||||||
System.Environment.Exit(0);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
Console.WriteLine("Connect to TDengine success");
|
|
||||||
}
|
|
||||||
return conn;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// output:
|
|
||||||
// Connect to TDengine success
|
|
||||||
// fieldCount=6
|
|
||||||
// ts current voltage phase location groupid
|
|
||||||
// 1648432611249 10.3 219 0.31 California.SanFrancisco 2
|
|
||||||
// 1648432611749 12.6 218 0.33 California.SanFrancisco 2
|
|
|
@ -1,70 +0,0 @@
|
||||||
using TDengineDriver;
|
|
||||||
|
|
||||||
|
|
||||||
namespace TDengineExample
|
|
||||||
{
|
|
||||||
internal class SQLInsertExample
|
|
||||||
{
|
|
||||||
|
|
||||||
static void Main()
|
|
||||||
{
|
|
||||||
IntPtr conn = GetConnection();
|
|
||||||
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
|
|
||||||
CheckRes(conn, res, "failed to create database");
|
|
||||||
res = TDengine.Query(conn, "USE power");
|
|
||||||
CheckRes(conn, res, "failed to change database");
|
|
||||||
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
|
|
||||||
CheckRes(conn, res, "failed to create stable");
|
|
||||||
var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
|
|
||||||
"d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
|
|
||||||
"d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
|
|
||||||
"d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
|
|
||||||
res = TDengine.Query(conn, sql);
|
|
||||||
CheckRes(conn, res, "failed to insert data");
|
|
||||||
int affectedRows = TDengine.AffectRows(res);
|
|
||||||
Console.WriteLine("affectedRows " + affectedRows);
|
|
||||||
TDengine.FreeResult(res);
|
|
||||||
ExitProgram(conn, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static IntPtr GetConnection()
|
|
||||||
{
|
|
||||||
string host = "localhost";
|
|
||||||
short port = 6030;
|
|
||||||
string username = "root";
|
|
||||||
string password = "taosdata";
|
|
||||||
string dbname = "";
|
|
||||||
var conn = TDengine.Connect(host, username, password, dbname, port);
|
|
||||||
if (conn == IntPtr.Zero)
|
|
||||||
{
|
|
||||||
Console.WriteLine("Connect to TDengine failed");
|
|
||||||
Environment.Exit(0);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
Console.WriteLine("Connect to TDengine success");
|
|
||||||
}
|
|
||||||
return conn;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void CheckRes(IntPtr conn, IntPtr res, String errorMsg)
|
|
||||||
{
|
|
||||||
if (TDengine.ErrorNo(res) != 0)
|
|
||||||
{
|
|
||||||
Console.Write(errorMsg + " since: " + TDengine.Error(res));
|
|
||||||
ExitProgram(conn, 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ExitProgram(IntPtr conn, int exitCode)
|
|
||||||
{
|
|
||||||
TDengine.Close(conn);
|
|
||||||
TDengine.Cleanup();
|
|
||||||
Environment.Exit(exitCode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// output:
|
|
||||||
// Connect to TDengine success
|
|
||||||
// affectedRows 8
|
|
|
@ -11,11 +11,17 @@ namespace TDengineExample
|
||||||
static void Main()
|
static void Main()
|
||||||
{
|
{
|
||||||
IntPtr conn = GetConnection();
|
IntPtr conn = GetConnection();
|
||||||
|
try
|
||||||
|
{
|
||||||
QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback);
|
QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback);
|
||||||
TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero);
|
TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero);
|
||||||
Thread.Sleep(2000);
|
Thread.Sleep(2000);
|
||||||
|
}
|
||||||
|
finally
|
||||||
|
{
|
||||||
TDengine.Close(conn);
|
TDengine.Close(conn);
|
||||||
TDengine.Cleanup();
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void QueryCallback(IntPtr param, IntPtr taosRes, int code)
|
static void QueryCallback(IntPtr param, IntPtr taosRes, int code)
|
||||||
|
@ -27,7 +33,7 @@ namespace TDengineExample
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Console.WriteLine($"async query data failed, failed code {code}");
|
throw new Exception($"async query data failed,code:{code},reason:{TDengine.Error(taosRes)}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +49,7 @@ namespace TDengineExample
|
||||||
|
|
||||||
for (int i = 0; i < dataList.Count; i++)
|
for (int i = 0; i < dataList.Count; i++)
|
||||||
{
|
{
|
||||||
if (i != 0 && (i+1) % metaList.Count == 0)
|
if (i != 0 && (i + 1) % metaList.Count == 0)
|
||||||
{
|
{
|
||||||
Console.WriteLine("{0}\t|", dataList[i]);
|
Console.WriteLine("{0}\t|", dataList[i]);
|
||||||
}
|
}
|
||||||
|
@ -63,7 +69,7 @@ namespace TDengineExample
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Console.WriteLine($"FetchRawBlockCallback callback error, error code {numOfRows}");
|
throw new Exception($"FetchRawBlockCallback callback error, error code {numOfRows}");
|
||||||
}
|
}
|
||||||
TDengine.FreeResult(taosRes);
|
TDengine.FreeResult(taosRes);
|
||||||
}
|
}
|
||||||
|
@ -79,8 +85,7 @@ namespace TDengineExample
|
||||||
var conn = TDengine.Connect(host, username, password, dbname, port);
|
var conn = TDengine.Connect(host, username, password, dbname, port);
|
||||||
if (conn == IntPtr.Zero)
|
if (conn == IntPtr.Zero)
|
||||||
{
|
{
|
||||||
Console.WriteLine("Connect to TDengine failed");
|
throw new Exception("Connect to TDengine failed");
|
||||||
Environment.Exit(0);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
|
@ -9,7 +9,7 @@
|
||||||
</PropertyGroup>
|
</PropertyGroup>
|
||||||
|
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
<PackageReference Include="TDengine.Connector" Version="3.0.1" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|
||||||
</Project>
|
</Project>
|
|
@ -16,7 +16,7 @@ namespace TDengineExample
|
||||||
var conn = TDengine.Connect(host, username, password, dbname, port);
|
var conn = TDengine.Connect(host, username, password, dbname, port);
|
||||||
if (conn == IntPtr.Zero)
|
if (conn == IntPtr.Zero)
|
||||||
{
|
{
|
||||||
Console.WriteLine("Connect to TDengine failed");
|
throw new Exception("Connect to TDengine failed");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|