Merge branch '3.0' into docs/wade-config
|
@ -88,4 +88,3 @@ Standard: Auto
|
|||
TabWidth: 8
|
||||
UseTab: Never
|
||||
...
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
*.py linguist-detectable=false
|
|
@ -1,5 +1,6 @@
|
|||
build/
|
||||
compile_commands.json
|
||||
CMakeSettings.json
|
||||
.cache
|
||||
.ycm_extra_conf.py
|
||||
.tasks
|
||||
|
@ -126,4 +127,5 @@ tools/THANKS
|
|||
tools/NEWS
|
||||
tools/COPYING
|
||||
tools/BUGS
|
||||
tools/taos-tools
|
||||
tools/taos-tools
|
||||
tools/taosws-rs
|
||||
|
|
|
@ -17,11 +17,12 @@ include(${TD_SUPPORT_DIR}/cmake.platform)
|
|||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
|
||||
# contrib
|
||||
add_subdirectory(contrib)
|
||||
|
||||
set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES OFF)
|
||||
|
||||
# api
|
||||
add_library(api INTERFACE)
|
||||
target_include_directories(api INTERFACE "include/client")
|
||||
|
@ -34,10 +35,9 @@ endif(${BUILD_TEST})
|
|||
|
||||
add_subdirectory(source)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(examples/c)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
|
||||
# docs
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
||||
# tests (TODO)
|
||||
add_subdirectory(docs/doxgen)
|
|
@ -1,25 +0,0 @@
|
|||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "WSL-GCC-Debug",
|
||||
"generator": "Unix Makefiles",
|
||||
"configurationType": "Debug",
|
||||
"buildRoot": "${projectDir}\\build\\",
|
||||
"installRoot": "${projectDir}\\build\\",
|
||||
"cmakeExecutable": "/usr/bin/cmake",
|
||||
"cmakeCommandArgs": "",
|
||||
"buildCommandArgs": "",
|
||||
"ctestCommandArgs": "",
|
||||
"inheritEnvironments": [ "linux_x64" ],
|
||||
"wslPath": "${defaultWSLPath}",
|
||||
"addressSanitizerRuntimeFlags": "detect_leaks=0",
|
||||
"variables": [
|
||||
{
|
||||
"name": "CMAKE_INSTALL_PREFIX",
|
||||
"value": "/mnt/d/TDengine/TDengine/build",
|
||||
"type": "PATH"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -79,7 +79,7 @@ def pre_test(){
|
|||
rm -rf debug
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. > /dev/null
|
||||
cmake .. -DBUILD_TEST=true > /dev/null
|
||||
make -j4> /dev/null
|
||||
|
||||
'''
|
||||
|
|
10
Jenkinsfile2
|
@ -173,7 +173,7 @@ def pre_test_build_mac() {
|
|||
'''
|
||||
sh '''
|
||||
cd ${WK}/debug
|
||||
cmake ..
|
||||
cmake .. -DBUILD_TEST=true
|
||||
make -j8
|
||||
'''
|
||||
sh '''
|
||||
|
@ -218,12 +218,12 @@ def pre_test_win(){
|
|||
if (env.CHANGE_URL =~ /\/TDengine\//) {
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git pull
|
||||
git pull origin ''' + env.CHANGE_TARGET + '''
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git remote prune origin
|
||||
git pull
|
||||
git pull origin ''' + env.CHANGE_TARGET + '''
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
|
@ -236,7 +236,7 @@ def pre_test_win(){
|
|||
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git pull
|
||||
git pull origin ''' + env.CHANGE_TARGET + '''
|
||||
'''
|
||||
bat '''
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
|
@ -302,7 +302,7 @@ def pre_test_build_win() {
|
|||
set CL=/MP8
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
|
||||
time /t
|
||||
cmake .. -G "NMake Makefiles JOM" || exit 7
|
||||
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true || exit 7
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
|
||||
time /t
|
||||
jom -j 6 || exit 8
|
||||
|
|
34
README-CN.md
|
@ -39,9 +39,9 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
|
|||
|
||||
# 构建
|
||||
|
||||
TDengine 目前可以在 Linux、 Windows 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
|
||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
|
||||
|
||||
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubenetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
||||
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
||||
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
|
||||
|
@ -104,6 +104,12 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco
|
|||
sudo yum config-manager --set-enabled Powertools
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig
|
||||
```
|
||||
|
||||
### 设置 golang 开发环境
|
||||
|
||||
TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。
|
||||
|
@ -210,14 +216,14 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
<!-- ### macOS 系统
|
||||
### macOS 系统
|
||||
|
||||
安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
||||
安装 XCode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
||||
|
||||
```bash
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
``` -->
|
||||
```
|
||||
|
||||
# 安装
|
||||
|
||||
|
@ -263,6 +269,24 @@ nmake install
|
|||
sudo make install
|
||||
```
|
||||
|
||||
用户可以在[文件目录结构](https://docs.taosdata.com/reference/directory/)中了解更多在操作系统中生成的目录或文件。
|
||||
|
||||
从源代码安装也会为 TDengine 配置服务管理 ,用户也可以选择[从安装包中安装](https://docs.taosdata.com/get-started/package/)。
|
||||
|
||||
安装成功后,可以在应用程序中双击 TDengine 图标启动服务,或者在终端中启动 TDengine 服务:
|
||||
|
||||
```bash
|
||||
launchctl start com.tdengine.taosd
|
||||
```
|
||||
|
||||
用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果 TDengine CLI 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
|
||||
|
||||
## 快速运行
|
||||
|
||||
如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
|
||||
|
|
56
README.md
|
@ -15,31 +15,33 @@
|
|||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
|
||||
English | [简体中文](README-CN.md) | We are hiring, check [here](https://tdengine.com/careers)
|
||||
English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine.com) | [Learn more about TSDB](https://tdengine.com/tsdb/)
|
||||
|
||||
# What is TDengine?
|
||||
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/what-is-a-time-series-database/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
|
||||
|
||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
- **[High Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
|
||||
- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
|
||||
# Documentation
|
||||
|
||||
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
|
||||
|
||||
# Building
|
||||
|
||||
At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
||||
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
||||
|
||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
||||
|
||||
|
@ -103,6 +105,12 @@ If the PowerTools installation fails, you can try to use:
|
|||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig
|
||||
```
|
||||
|
||||
### Setup golang environment
|
||||
|
||||
TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup.
|
||||
|
@ -211,14 +219,14 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
<!-- ### On macOS platform
|
||||
### On macOS platform
|
||||
|
||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
``` -->
|
||||
```
|
||||
|
||||
# Installing
|
||||
|
||||
|
@ -230,9 +238,9 @@ After building successfully, TDengine can be installed by
|
|||
sudo make install
|
||||
```
|
||||
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section.
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.taosdata.com/get-started/package/) for it.
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||
|
||||
To start the service after installation, in a terminal, use:
|
||||
|
||||
|
@ -256,7 +264,7 @@ After building successfully, TDengine can be installed by:
|
|||
nmake install
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
## On macOS platform
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
@ -264,7 +272,24 @@ After building successfully, TDengine can be installed by:
|
|||
```bash
|
||||
sudo make install
|
||||
```
|
||||
-->
|
||||
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||
|
||||
To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use:
|
||||
|
||||
```bash
|
||||
launchctl start com.tdengine.taosd
|
||||
```
|
||||
|
||||
Then users can use the TDengine CLI to connect the TDengine server. In a terminal, use:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
|
||||
|
||||
## Quick Run
|
||||
|
||||
|
@ -319,6 +344,11 @@ TDengine provides abundant developing tools for users to develop on TDengine. Fo
|
|||
|
||||
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
|
||||
|
||||
# Join TDengine WeChat Group
|
||||
# Join the TDengine Community
|
||||
|
||||
Add WeChat “tdengine” to join the group,you can communicate with other users.
|
||||
For more information about TDengine, you can follow us on social media and join our Discord server:
|
||||
|
||||
- [Discord](https://discord.com/invite/VZdSuUg4pS)
|
||||
- [Twitter](https://twitter.com/TaosData)
|
||||
- [LinkedIn](https://www.linkedin.com/company/tdengine/)
|
||||
- [YouTube](https://www.youtube.com/channel/UCmp-1U6GS_3V3hjir6Uq5DQ)
|
||||
|
|
BIN
TDenginelogo.png
Before Width: | Height: | Size: 19 KiB |
|
@ -117,8 +117,8 @@ ELSE ()
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Will compile with Address Sanitizer!")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=0")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
|
|
|
@ -2,6 +2,12 @@
|
|||
# Deps options
|
||||
# =========================================================
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
OFF
|
||||
)
|
||||
|
||||
IF(${TD_WINDOWS})
|
||||
|
||||
MESSAGE("build pthread Win32")
|
||||
|
@ -45,12 +51,6 @@ IF(${TD_WINDOWS})
|
|||
"If build wingetopt on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
TDENGINE_3
|
||||
|
@ -65,28 +65,8 @@ IF(${TD_WINDOWS})
|
|||
)
|
||||
|
||||
ELSEIF (TD_DARWIN_64)
|
||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
ON
|
||||
)
|
||||
ELSE ()
|
||||
include(CheckCXXCompilerFlag)
|
||||
CHECK_CXX_COMPILER_FLAG("-std=c++13" COMPILER_SUPPORTS_CXX13)
|
||||
IF(${COMPILER_SUPPORTS_CXX13})
|
||||
IF(${BUILD_TEST})
|
||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
ON
|
||||
)
|
||||
ELSE ()
|
||||
option(
|
||||
BUILD_TEST
|
||||
"If build unit tests using googletest"
|
||||
OFF
|
||||
)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
|
|
|
@ -45,10 +45,19 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
|
||||
|
||||
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
|
||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
|
||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
|
||||
MESSAGE("Current system arch is arm64")
|
||||
SET(TD_DARWIN_64 TRUE)
|
||||
SET(TD_DARWIN_ARM64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_DARWIN_64")
|
||||
ADD_DEFINITIONS("-D_TD_DARWIN_ARM64")
|
||||
ENDIF ()
|
||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
|
||||
MESSAGE("Current system arch is x86_64")
|
||||
SET(TD_DARWIN_64 TRUE)
|
||||
SET(TD_DARWIN_X64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_DARWIN_64")
|
||||
ADD_DEFINITIONS("-D_TD_DARWIN_X64")
|
||||
ENDIF ()
|
||||
|
||||
ADD_DEFINITIONS("-DHAVE_UNISTD_H")
|
||||
|
@ -87,7 +96,7 @@ IF ("${CPUTYPE}" STREQUAL "")
|
|||
SET(TD_ARM_32 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_32")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
|
||||
MESSAGE(STATUS "The current platform is aarch64")
|
||||
SET(PLATFORM_ARCH_STR "arm64")
|
||||
SET(TD_ARM_64 TRUE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.0.1")
|
||||
SET(TD_VER_NUMBER "3.0.1.5")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG abed566
|
||||
GIT_TAG cc43ef0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG aa45ad4
|
||||
GIT_TAG cc973e0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG 7a54d21
|
||||
GIT_TAG 0373a70
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -37,6 +37,11 @@ if(${BUILD_WITH_ICONV})
|
|||
cat("${TD_SUPPORT_DIR}/iconv_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# jemalloc
|
||||
if(${JEMALLOC_ENABLED})
|
||||
cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# msvc regex
|
||||
if(${BUILD_MSVCREGEX})
|
||||
cat("${TD_SUPPORT_DIR}/msvcregex_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -258,6 +263,19 @@ if(${BUILD_PTHREAD})
|
|||
target_link_libraries(pthread INTERFACE libpthreadVC3)
|
||||
endif()
|
||||
|
||||
# jemalloc
|
||||
if(${JEMALLOC_ENABLED})
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(jemalloc
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --disable-initial-exec-tls --with-malloc-conf='background_thread:true,metadata_thp:auto'
|
||||
BUILD_COMMAND ${MAKE}
|
||||
)
|
||||
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
|
||||
endif()
|
||||
|
||||
# crashdump
|
||||
if(${BUILD_CRASHDUMP})
|
||||
add_executable(dumper "crashdump/dumper/dumper.c")
|
||||
|
|
|
@ -4,25 +4,26 @@ sidebar_label: Documentation Home
|
|||
slug: /
|
||||
---
|
||||
|
||||
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
|
||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
||||
|
||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||
|
||||
TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
|
||||
TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
|
||||
|
||||
If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
|
||||
If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
|
||||
|
||||
We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
|
||||
We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
|
||||
|
||||
TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
|
||||
TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
|
||||
|
||||
If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
|
||||
If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
|
||||
|
||||
If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
|
||||
If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
|
||||
|
||||
If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
||||
If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
||||
|
||||
TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
||||
To get more general introduction about time series database, please read through [a series of articles](https://tdengine.com/tsdb/). To lean more competitive advantages about TDengine, please read through [a series of blogs](https://tdengine.com/tdengine/).
|
||||
|
||||
Together, we make a difference.
|
||||
TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
||||
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Introduction
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
|
||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||
|
||||
|
@ -12,60 +12,68 @@ This section introduces the major features, competitive advantages, typical use-
|
|||
The major features are listed below:
|
||||
|
||||
1. Insert data
|
||||
* supports [using SQL to insert](../develop/insert-data/sql-writing).
|
||||
* supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB LINE](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
|
||||
* supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/) and [icinga2/](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
|
||||
- Supports [using SQL to insert](../develop/insert-data/sql-writing).
|
||||
- Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
|
||||
- Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
|
||||
2. Query data
|
||||
* supports standard [SQL](../taos-sql/), including nested query.
|
||||
* supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
|
||||
* supports [user defined functions](../taos-sql/udf).
|
||||
- Supports standard [SQL](../taos-sql/), including nested query.
|
||||
- Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
|
||||
- Supports [User Defined Functions (UDF)](../taos-sql/udf).
|
||||
3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
|
||||
4. [Stream Processing](../develop/stream/): not only is the continuous query is supported, but TDengine also supports even driven stream processing, so Flink or spark is not needed for time-series daata processing.
|
||||
5. [Data Dubscription](../develop/tmq/): application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
||||
4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
|
||||
5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
||||
6. Visualization
|
||||
* supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
|
||||
* supports seamless integration with Google Data Studio.
|
||||
- Supports seamless integration with [Grafana](../third-party/grafana/).
|
||||
- Supports seamless integration with [Google Data Studio](../third-party/google-data-studio/).
|
||||
7. Cluster
|
||||
* supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
|
||||
* supports [deployment on Kubernetes](../deployment/k8s/)
|
||||
* supports high availability via data replication.
|
||||
- Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
|
||||
- Supports [deployment on Kubernetes](../deployment/k8s/).
|
||||
- Supports high availability via data replication.
|
||||
8. Administration
|
||||
* provides [monitoring](../operation/monitor) on running instances of TDengine.
|
||||
* provides many ways to [import](../operation/import) and [export](../operation/export) data.
|
||||
- Provides [monitoring](../operation/monitor) on running instances of TDengine.
|
||||
- Provides many ways to [import](../operation/import) and [export](../operation/export) data.
|
||||
9. Tools
|
||||
* provides an interactive [command-line interface](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
* provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
|
||||
- Provides an interactive [Command Line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
- Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
|
||||
10. Programming
|
||||
* provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
|
||||
* provides a [REST API](../reference/rest-api/).
|
||||
- Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
|
||||
- Provides a [REST API](../reference/rest-api/).
|
||||
|
||||
For more details on features, please read through the entire documentation.
|
||||
For more details on features, please read through the entire documentation.
|
||||
|
||||
## Competitive Advantages
|
||||
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages.
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
|
||||
|
||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
|
||||
|
||||
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
|
||||
](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
||||
|
||||
1. With its superior performance, the computing and storage resources are reduced significantly.
|
||||
2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
|
||||
3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
|
||||
|
||||
## Technical Ecosystem
|
||||
|
||||
This is how TDengine would be situated, in a typical time-series data processing platform:
|
||||
|
||||
<figure>
|
||||
|
||||

|
||||
|
||||
<center>Figure 1. TDengine Technical Ecosystem</center>
|
||||
<center><figcaption>Figure 1. TDengine Technical Ecosystem</figcaption></center>
|
||||
</figure>
|
||||
|
||||
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
|
||||
|
||||
|
@ -75,42 +83,42 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
### Characteristics and Requirements of Data Sources
|
||||
|
||||
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
|
||||
| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
|
||||
| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
|
||||
| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
|
||||
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
|
||||
| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
|
||||
| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
|
||||
|
||||
### System Architecture Requirements
|
||||
|
||||
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
|
||||
| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
|
||||
| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
|
||||
| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
|
||||
| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
|
||||
|
||||
### System Function Requirements
|
||||
|
||||
| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
|
||||
| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
|
||||
| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
|
||||
| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
|
||||
|
||||
### System Performance Requirements
|
||||
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
|
||||
### System Maintenance Requirements
|
||||
|
||||
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
|
||||
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
|
||||
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
|
||||
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
|
@ -119,3 +127,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
|
||||
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
|
||||
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
|
||||
|
||||
## More readings
|
||||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||
|
||||
|
|
|
@ -6,101 +6,100 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
|||
|
||||
<div className="center-table">
|
||||
<table>
|
||||
<thead><tr>
|
||||
<th>Device ID</th>
|
||||
<th>Time Stamp</th>
|
||||
<th colSpan="3">Collected Metrics</th>
|
||||
<th colSpan="2">Tags</th>
|
||||
<thead>
|
||||
<tr>
|
||||
<th rowSpan="2">Device ID</th>
|
||||
<th rowSpan="2">Timestamp</th>
|
||||
<th colSpan="3">Collected Metrics</th>
|
||||
<th colSpan="2">Tags</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>Device ID</th>
|
||||
<th>Time Stamp</th>
|
||||
<th>current</th>
|
||||
<th>voltage</th>
|
||||
<th>phase</th>
|
||||
<th>location</th>
|
||||
<th>groupId</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548685000</td>
|
||||
<td>10.3</td>
|
||||
<td>219</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548684000</td>
|
||||
<td>10.2</td>
|
||||
<td>220</td>
|
||||
<td>0.23</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1003</td>
|
||||
<td>1538548686500</td>
|
||||
<td>11.5</td>
|
||||
<td>221</td>
|
||||
<td>0.35</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548685500</td>
|
||||
<td>13.4</td>
|
||||
<td>223</td>
|
||||
<td>0.29</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548695000</td>
|
||||
<td>12.6</td>
|
||||
<td>218</td>
|
||||
<td>0.33</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548696600</td>
|
||||
<td>11.8</td>
|
||||
<td>221</td>
|
||||
<td>0.28</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548696650</td>
|
||||
<td>10.3</td>
|
||||
<td>218</td>
|
||||
<td>0.25</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548696800</td>
|
||||
<td>12.3</td>
|
||||
<td>221</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tr>
|
||||
<th>current</th>
|
||||
<th>voltage</th>
|
||||
<th>phase</th>
|
||||
<th>location</th>
|
||||
<th>groupid</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548685000</td>
|
||||
<td>10.3</td>
|
||||
<td>219</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548684000</td>
|
||||
<td>10.2</td>
|
||||
<td>220</td>
|
||||
<td>0.23</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1003</td>
|
||||
<td>1538548686500</td>
|
||||
<td>11.5</td>
|
||||
<td>221</td>
|
||||
<td>0.35</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548685500</td>
|
||||
<td>13.4</td>
|
||||
<td>223</td>
|
||||
<td>0.29</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548695000</td>
|
||||
<td>12.6</td>
|
||||
<td>218</td>
|
||||
<td>0.33</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1004</td>
|
||||
<td>1538548696600</td>
|
||||
<td>11.8</td>
|
||||
<td>221</td>
|
||||
<td>0.28</td>
|
||||
<td>California.LosAngeles</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1002</td>
|
||||
<td>1538548696650</td>
|
||||
<td>10.3</td>
|
||||
<td>218</td>
|
||||
<td>0.25</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>d1001</td>
|
||||
<td>1538548696800</td>
|
||||
<td>12.3</td>
|
||||
<td>221</td>
|
||||
<td>0.31</td>
|
||||
<td>California.SanFrancisco</td>
|
||||
<td>2</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<a href="#model_table1">Table 1: Smart meter example data</a>
|
||||
</div>
|
||||
|
||||
Each row contains the device ID, time stamp, collected metrics (current, voltage, phase as above), and static tags (location and groupId in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated time stamps.
|
||||
Each row contains the device ID, timestamp, collected metrics (`current`, `voltage`, `phase` as above), and static tags (`location` and `groupid` in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated timestamps.
|
||||
|
||||
## Metric
|
||||
|
||||
|
@ -112,22 +111,22 @@ Label/Tag refers to the static properties of sensors, equipment or other types o
|
|||
|
||||
## Data Collection Point
|
||||
|
||||
Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
|
||||
Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same timestamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
|
||||
|
||||
## Table
|
||||
|
||||
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data.
|
||||
|
||||
To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several benefits:
|
||||
To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices d1001, d1002, d1003, and d1004 to store the data collected. This design has several benefits:
|
||||
|
||||
1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved.
|
||||
2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed.
|
||||
3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
|
||||
4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate.
|
||||
|
||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. ** One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||
|
||||
TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
||||
|
||||
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
||||
|
||||
|
@ -156,9 +155,16 @@ The relationship between a STable and the subtables created based on this STable
|
|||
|
||||
Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type.
|
||||
|
||||
In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters.
|
||||
In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table `meters`.
|
||||
|
||||
To better understand the data model using metri, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example. 
|
||||
To better understand the data model using metrics, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example.
|
||||
|
||||
<figure>
|
||||
|
||||

|
||||
|
||||
<center><figcaption>Figure 1. Meters Data Model Diagram</figcaption></center>
|
||||
</figure>
|
||||
|
||||
## Database
|
||||
|
||||
|
@ -172,4 +178,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute
|
|||
|
||||
Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet.
|
||||
|
||||
TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
|
||||
TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
|
||||
|
|
|
@ -3,17 +3,33 @@ sidebar_label: Docker
|
|||
title: Quick Install on Docker
|
||||
---
|
||||
|
||||
This document describes how to install TDengine in a Docker container and perform queries and inserts. To get started with TDengine in a non-containerized environment, see [Quick Install](../../get-started/package). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
|
||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
|
||||
## Run TDengine
|
||||
|
||||
If Docker is already installed on your computer, run the following command:
|
||||
If Docker is already installed on your computer, pull the latest TDengine Docker container image:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:latest
|
||||
```
|
||||
|
||||
Or the container image of specific version:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:3.0.1.4
|
||||
```
|
||||
|
||||
And then run the following command:
|
||||
|
||||
```shell
|
||||
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||
```
|
||||
|
||||
Note that TDengine Server uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
||||
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
||||
|
||||
Run the following command to ensure that your container is running:
|
||||
|
||||
|
@ -21,7 +37,7 @@ Run the following command to ensure that your container is running:
|
|||
docker ps
|
||||
```
|
||||
|
||||
Enter the container and open the bash shell:
|
||||
Enter the container and open the `bash` shell:
|
||||
|
||||
```shell
|
||||
docker exec -it <container name> bash
|
||||
|
@ -31,68 +47,68 @@ You can now access TDengine or run other Linux commands.
|
|||
|
||||
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
|
||||
|
||||
## Insert Data into TDengine
|
||||
|
||||
You can use the `taosBenchmark` tool included with TDengine to write test data into your deployment.
|
||||
|
||||
To do so, run the following command:
|
||||
|
||||
```bash
|
||||
$ taosBenchmark
|
||||
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](/reference/taosbenchmark).
|
||||
|
||||
## Open the TDengine CLI
|
||||
|
||||
On the container, run the following command to open the TDengine CLI:
|
||||
On the container, run the following command to open the TDengine CLI:
|
||||
|
||||
```
|
||||
$ taos
|
||||
|
||||
taos>
|
||||
taos>
|
||||
|
||||
```
|
||||
|
||||
## Query Data in TDengine
|
||||
## Test data insert performance
|
||||
|
||||
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance. For example:
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
From the TDengine CLI query the number of rows in the `meters` supertable:
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a terminal.
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||
|
||||
## Test data query performance
|
||||
|
||||
After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||
|
||||
From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters;
|
||||
SELECT COUNT(*) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all 100 million rows of data:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the number of rows whose `location` tag is `San Francisco`:
|
||||
Query the number of rows whose `location` tag is `California.SanFrancisco`:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters where location="San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
|
||||
Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
|
||||
|
||||
```sql
|
||||
select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
|
||||
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||
```
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
||||
## Additional Information
|
||||
|
||||
|
|
|
@ -7,25 +7,30 @@ import Tabs from "@theme/Tabs";
|
|||
import TabItem from "@theme/TabItem";
|
||||
import PkgListV3 from "/components/PkgListV3";
|
||||
|
||||
For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
|
||||
|
||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
|
||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
|
||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector.
|
||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
|
||||
|
||||
The TDengine Community Edition is released as .deb and .rpm packages. The .deb package can be installed on Debian, Ubuntu, and derivative systems. The .rpm package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows servers.
|
||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
||||
|
||||
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
|
||||
|
||||
## Installation
|
||||
|
||||
<Tabs>
|
||||
<TabItem label=".deb" value="debinst">
|
||||
|
||||
1. Download the .deb installation package.
|
||||
<PkgListV3 type={6}/>
|
||||
1. Download the Deb installation package.
|
||||
<PkgListV3 type={6}/>
|
||||
2. In the directory where the package is located, use `dpkg` to install the package:
|
||||
|
||||
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||
|
||||
```bash
|
||||
# Enter the name of the package that you downloaded.
|
||||
sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
||||
```
|
||||
|
||||
|
@ -34,11 +39,12 @@ sudo dpkg -i TDengine-server-<version>-Linux-x64.deb
|
|||
<TabItem label=".rpm" value="rpminst">
|
||||
|
||||
1. Download the .rpm installation package.
|
||||
<PkgListV3 type={5}/>
|
||||
<PkgListV3 type={5}/>
|
||||
2. In the directory where the package is located, use rpm to install the package:
|
||||
|
||||
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||
|
||||
```bash
|
||||
# Enter the name of the package that you downloaded.
|
||||
sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
||||
```
|
||||
|
||||
|
@ -47,11 +53,12 @@ sudo rpm -ivh TDengine-server-<version>-Linux-x64.rpm
|
|||
<TabItem label=".tar.gz" value="tarinst">
|
||||
|
||||
1. Download the .tar.gz installation package.
|
||||
<PkgListV3 type={0}/>
|
||||
<PkgListV3 type={0}/>
|
||||
2. In the directory where the package is located, use `tar` to decompress the package:
|
||||
|
||||
> Please replace `<version>` with the corresponding version of the package downloaded
|
||||
|
||||
```bash
|
||||
# Enter the name of the package that you downloaded.
|
||||
tar -zxvf TDengine-server-<version>-Linux-x64.tar.gz
|
||||
```
|
||||
|
||||
|
@ -96,23 +103,30 @@ sudo apt-get install tdengine
|
|||
This installation method is supported only for Debian and Ubuntu.
|
||||
::::
|
||||
</TabItem>
|
||||
<TabItem label="Windows" value="windows">
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
|
||||
Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the Windows platform.
|
||||
|
||||
1. Download the Windows installation package.
|
||||
<PkgListV3 type={3}/>
|
||||
<PkgListV3 type={3}/>
|
||||
2. Run the downloaded package to install TDengine.
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="macOS" value="macos">
|
||||
|
||||
1. Download the macOS installation package.
|
||||
<PkgListV3 type={7}/>
|
||||
2. Run the downloaded package to install TDengine. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
For information about TDengine releases, see [Release History](../../releases).
|
||||
For information about TDengine other releases, check [Release History](../../releases/tdengine).
|
||||
:::
|
||||
|
||||
:::note
|
||||
On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the end point of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
|
||||
On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the endpoint of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -147,7 +161,7 @@ Active: inactive (dead)
|
|||
|
||||
After confirming that TDengine is running, run the `taos` command to access the TDengine CLI.
|
||||
|
||||
The following `systemctl` commands can help you manage TDengine:
|
||||
The following `systemctl` commands can help you manage TDengine service:
|
||||
|
||||
- Start TDengine Server: `systemctl start taosd`
|
||||
|
||||
|
@ -159,39 +173,54 @@ The following `systemctl` commands can help you manage TDengine:
|
|||
|
||||
:::info
|
||||
|
||||
- The `systemctl` command requires _root_ privileges. If you are not logged in as the `root` user, use the `sudo` command.
|
||||
- The `systemctl` command requires _root_ privileges. If you are not logged in as the _root_ user, use the `sudo` command.
|
||||
- The `systemctl stop taosd` command does not instantly stop TDengine Server. The server is stopped only after all data in memory is flushed to disk. The time required depends on the cache size.
|
||||
- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taosd` to start TDengine manually.
|
||||
|
||||
:::
|
||||
|
||||
## Command Line Interface (CLI)
|
||||
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in terminal.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengine Server.
|
||||
|
||||
## Command Line Interface (CLI)
|
||||
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal to start the TDengine command line.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="macOS" value="macos">
|
||||
|
||||
After the installation is complete, double-click the /applications/TDengine to start the program, or run `launchctl start com.tdengine.taosd` to start TDengine Server.
|
||||
|
||||
The following `launchctl` commands can help you manage TDengine service:
|
||||
|
||||
- Start TDengine Server: `launchctl start com.tdengine.taosd`
|
||||
|
||||
- Stop TDengine Server: `launchctl stop com.tdengine.taosd`
|
||||
|
||||
- Check TDengine Server status: `launchctl list | grep taosd`
|
||||
|
||||
:::info
|
||||
|
||||
- The `launchctl` command does not require _root_ privileges. You don't need to use the `sudo` command.
|
||||
- The first content returned by the `launchctl list | grep taosd` command is the PID of the program, if '-' indicates that the TDengine service is not running.
|
||||
|
||||
:::
|
||||
|
||||
## Command Line Interface (CLI)
|
||||
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in terminal.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Test data insert performance
|
||||
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||
|
||||
## Command Line Interface
|
||||
|
||||
You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, run the following command:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
@ -205,52 +234,71 @@ taos>
|
|||
For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
|
||||
|
||||
```sql
|
||||
create database demo;
|
||||
use demo;
|
||||
create table t (ts timestamp, speed int);
|
||||
insert into t values ('2019-07-15 00:00:00', 10);
|
||||
insert into t values ('2019-07-15 01:00:00', 20);
|
||||
select * from t;
|
||||
CREATE DATABASE demo;
|
||||
USE demo;
|
||||
CREATE TABLE t (ts TIMESTAMP, speed INT);
|
||||
INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
|
||||
INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
|
||||
SELECT * FROM t;
|
||||
|
||||
ts | speed |
|
||||
========================================
|
||||
2019-07-15 00:00:00.000 | 10 |
|
||||
2019-07-15 01:00:00.000 | 20 |
|
||||
|
||||
Query OK, 2 row(s) in set (0.003128s)
|
||||
```
|
||||
|
||||
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
||||
|
||||
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
|
||||
|
||||
## Test data insert performance
|
||||
|
||||
After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
|
||||
|
||||
Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a terminal.
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
|
||||
|
||||
The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
|
||||
|
||||
You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
|
||||
|
||||
## Test data query performance
|
||||
|
||||
After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||
After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
|
||||
|
||||
From the TDengine CLI query the number of rows in the `meters` supertable:
|
||||
From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters;
|
||||
SELECT COUNT(*) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all 100 million rows of data:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
|
||||
```
|
||||
|
||||
Query the number of rows whose `location` tag is `San Francisco`:
|
||||
Query the number of rows whose `location` tag is `California.SanFrancisco`:
|
||||
|
||||
```sql
|
||||
select count(*) from test.meters where location="San Francisco";
|
||||
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
|
||||
|
||||
```sql
|
||||
select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
||||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||
```
|
||||
|
||||
Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
|
||||
Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
|
||||
|
||||
```sql
|
||||
select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
|
||||
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||
```
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
||||
In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
|
||||
|
|
|
@ -3,9 +3,9 @@ title: Get Started
|
|||
description: This article describes how to install TDengine and test its performance.
|
||||
---
|
||||
|
||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
|
||||
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
|
||||
|
||||
You can install and run TDengine on Linux and Windows machines as well as Docker containers.
|
||||
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
```csharp title="Native Connection"
|
||||
{{#include docs/examples/csharp/ConnectExample.cs}}
|
||||
{{#include docs/examples/csharp/connect/Program.cs}}
|
||||
```
|
||||
|
||||
:::info
|
||||
C# connector supports only native connection for now.
|
||||
|
||||
:::
|
||||
```csharp title="WebSocket Connection"
|
||||
{{#include docs/examples/csharp/wsConnect/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
title: Connect
|
||||
description: "This document explains how to establish connections to TDengine and how to install and use TDengine connectors."
|
||||
sidebar_label: Connect
|
||||
title: Connect to TDengine
|
||||
description: "How to establish connections to TDengine and how to install and use TDengine connectors."
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -14,10 +15,12 @@ import ConnCSNative from "./_connect_cs.mdx";
|
|||
import ConnC from "./_connect_c.mdx";
|
||||
import ConnR from "./_connect_r.mdx";
|
||||
import ConnPHP from "./_connect_php.mdx";
|
||||
import InstallOnWindows from "../../14-reference/03-connector/_linux_install.mdx";
|
||||
import InstallOnLinux from "../../14-reference/03-connector/_windows_install.mdx";
|
||||
import InstallOnLinux from "../../14-reference/03-connector/_linux_install.mdx";
|
||||
import InstallOnWindows from "../../14-reference/03-connector/_windows_install.mdx";
|
||||
import InstallOnMacOS from "../../14-reference/03-connector/_macos_install.mdx";
|
||||
import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
|
||||
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
|
||||
import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx";
|
||||
|
||||
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](/reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
||||
|
||||
|
@ -43,10 +46,13 @@ If you are choosing to use the native connection and the the application is not
|
|||
|
||||
<Tabs defaultValue="linux" groupId="os">
|
||||
<TabItem value="linux" label="Linux">
|
||||
<InstallOnWindows />
|
||||
<InstallOnLinux />
|
||||
</TabItem>
|
||||
<TabItem value="windows" label="Windows">
|
||||
<InstallOnLinux />
|
||||
<InstallOnWindows />
|
||||
</TabItem>
|
||||
<TabItem value="macos" label="MacOS">
|
||||
<InstallOnMacOS />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -61,13 +67,16 @@ After the above installation and configuration are done and making sure TDengine
|
|||
<TabItem value="windows" label="Windows">
|
||||
<VerifyWindows />
|
||||
</TabItem>
|
||||
<TabItem value="macos" label="MacOS">
|
||||
<VerifyMacOS />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Install Connectors
|
||||
|
||||
<Tabs groupId="lang">
|
||||
<TabItem label="Java" value="java">
|
||||
|
||||
|
||||
If `maven` is used to manage the projects, what needs to be done is only adding below dependency in `pom.xml`.
|
||||
|
||||
```xml
|
||||
|
@ -137,19 +146,19 @@ Node.js connector provides different ways of establishing connections by providi
|
|||
|
||||
1. Install Node.js Native Connector
|
||||
|
||||
```
|
||||
npm install @tdengine/client
|
||||
```
|
||||
```
|
||||
npm install @tdengine/client
|
||||
```
|
||||
|
||||
:::note
|
||||
It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13.0.0`.
|
||||
:::
|
||||
|
||||
:::
|
||||
|
||||
2. Install Node.js REST Connector
|
||||
|
||||
```
|
||||
npm install @tdengine/rest
|
||||
```
|
||||
```
|
||||
npm install @tdengine/rest
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="C#" value="csharp">
|
||||
|
|
|
@ -2,12 +2,10 @@
|
|||
title: Data Model
|
||||
---
|
||||
|
||||
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
|
||||
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
|
||||
|
||||
Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)".
|
||||
|
||||
|
||||
|
||||
## Create Database
|
||||
|
||||
The characteristics of time-series data from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the size of the cache, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. An example is shown as follows:
|
||||
|
@ -17,10 +15,11 @@ CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1;
|
|||
```
|
||||
|
||||
In the above SQL statement:
|
||||
|
||||
- a database named "power" is created
|
||||
- the data in it is retained for 365 days, which means that data older than 365 days will be deleted automatically
|
||||
- a new data file will be created every 10 days
|
||||
- the size of the write cache pool on each vnode is 16 MB
|
||||
- the size of the write cache pool on each VNode is 16 MB
|
||||
- the number of vgroups is 100
|
||||
- WAL is enabled but fsync is disabled For more details please refer to [Database](/taos-sql/database).
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ measurement,tag_set field_set timestamp
|
|||
- `measurement` will be used as the name of the STable Enter a comma (,) between `measurement` and `tag_set`.
|
||||
- `tag_set` will be used as tags, with format like `<tag_key>=<tag_value>,<tag_key>=<tag_value>` Enter a space between `tag_set` and `field_set`.
|
||||
- `field_set`will be used as data columns, with format like `<field_key>=<field_value>,<field_key>=<field_value>` Enter a space between `field_set` and `timestamp`.
|
||||
- `timestamp` is the primary key timestamp corresponding to this row of data
|
||||
- `timestamp` is the primary key timestamp corresponding to this row of data
|
||||
|
||||
For example:
|
||||
|
||||
|
@ -34,11 +34,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
|
||||
:::note
|
||||
|
||||
- All the data in `tag_set` will be converted to nchar type automatically .
|
||||
- All the data in `tag_set` will be converted to NCHAR type automatically .
|
||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
||||
|
||||
:::
|
||||
- You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||
:::
|
||||
|
||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||
|
||||
|
@ -64,3 +65,11 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
|
|||
<CLine />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
||||
```
|
||||
|
|
|
@ -24,7 +24,7 @@ A single line of text is used in OpenTSDB line protocol to represent one row of
|
|||
- `metric` will be used as the STable name.
|
||||
- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported.
|
||||
- `value` is a metric which must be a numeric value, The corresponding column name is "value".
|
||||
- The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically.
|
||||
- The last part is the tag set separated by spaces, all tags will be converted to NCHAR type automatically.
|
||||
|
||||
For example:
|
||||
|
||||
|
@ -32,7 +32,8 @@ For example:
|
|||
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||
```
|
||||
|
||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify child table names, for example, `smlChildTableName=tname`. You can insert `meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -64,10 +65,10 @@ taos> use test;
|
|||
Database changed.
|
||||
|
||||
taos> show stables;
|
||||
name | created_time | columns | tags | tables |
|
||||
============================================================================================
|
||||
meters.current | 2022-03-30 17:04:10.877 | 2 | 2 | 2 |
|
||||
meters.voltage | 2022-03-30 17:04:10.882 | 2 | 2 | 2 |
|
||||
name |
|
||||
=================================
|
||||
meters.current |
|
||||
meters.voltage |
|
||||
Query OK, 2 row(s) in set (0.002544s)
|
||||
|
||||
taos> select tbname, * from `meters.current`;
|
||||
|
@ -79,3 +80,11 @@ taos> select tbname, * from `meters.current`;
|
|||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
||||
Query OK, 4 row(s) in set (0.005399s)
|
||||
```
|
||||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of `location=California.LosAngeles groupid=3`,here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
```
|
||||
|
|
|
@ -46,10 +46,10 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
|||
|
||||
:::note
|
||||
|
||||
- In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type.
|
||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
||||
|
||||
:::
|
||||
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify child table names, for example, `smlChildTableName=tname`. You can insert `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
:::
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -81,10 +81,10 @@ taos> use test;
|
|||
Database changed.
|
||||
|
||||
taos> show stables;
|
||||
name | created_time | columns | tags | tables |
|
||||
============================================================================================
|
||||
meters.current | 2022-03-29 16:05:25.193 | 2 | 2 | 1 |
|
||||
meters.voltage | 2022-03-29 16:05:25.200 | 2 | 2 | 1 |
|
||||
name |
|
||||
=================================
|
||||
meters.current |
|
||||
meters.voltage |
|
||||
Query OK, 2 row(s) in set (0.001954s)
|
||||
|
||||
taos> select * from `meters.current`;
|
||||
|
@ -94,3 +94,11 @@ taos> select * from `meters.current`;
|
|||
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
||||
Query OK, 2 row(s) in set (0.004076s)
|
||||
```
|
||||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
```
|
||||
|
|
|
@ -16,16 +16,16 @@ To achieve high performance writing, there are a few aspects to consider. In the
|
|||
|
||||
From the perspective of application program, you need to consider:
|
||||
|
||||
1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. It can be configured by parameter `maxSQLLength` on client side, and the default value is 65,480.
|
||||
1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
|
||||
|
||||
2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
|
||||
|
||||
3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
|
||||
|
||||
4. Data Writing Protocol.
|
||||
- Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
|
||||
- Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it
|
||||
- Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema
|
||||
- Parameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
|
||||
- Writing to known existing tables is more efficient than writing to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it.
|
||||
- Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creates table automatically and may alter table schema.
|
||||
|
||||
Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
|
||||
|
||||
|
@ -37,7 +37,7 @@ Application programs need to read data from data source then write into TDengine
|
|||
2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
|
||||
3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
|
||||
|
||||
If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
|
||||
If the data source is Kafka, then the application program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
|
||||
|
||||
1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
|
||||
2. Subscribe multiple topics to accumulate data together.
|
||||
|
@ -46,12 +46,9 @@ If the data source is Kafka, then the appication program is a consumer of Kafka,
|
|||
|
||||
### Tune TDengine
|
||||
|
||||
TDengine is a distributed and high performance time series database, there are also some ways to tune TDengine to get better writing performance.
|
||||
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
||||
|
||||
1. Set proper number of `vgroups` according to available CPU cores. Normally, we recommend 2 \* number_of_cores as a starting point. If the verification result shows this is not enough to utilize CPU resources, you can use a higher value.
|
||||
2. Set proper `minTablesPerVnode`, `tableIncStepPerVnode`, and `maxVgroupsPerDb` according to the number of tables so that tables are distributed even across vgroups. The purpose is to balance the workload among all vnodes so that system resources can be utilized better to get higher performance.
|
||||
|
||||
For more performance tuning parameters, please refer to [Configuration Parameters](../../../reference/config).
|
||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
|
||||
|
||||
## Sample Programs
|
||||
|
||||
|
@ -59,7 +56,7 @@ This section will introduce the sample programs to demonstrate how to write into
|
|||
|
||||
### Scenario
|
||||
|
||||
Below are the scenario for the sample programs of high performance wrting.
|
||||
Below are the scenario for the sample programs of high performance writing.
|
||||
|
||||
- Application program reads data from data source, the sample program simulates a data source by generating data
|
||||
- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
|
||||
|
@ -83,7 +80,7 @@ The sample programs assume the source data is for all the different sub tables i
|
|||
| ---------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| FastWriteExample | Main Program |
|
||||
| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
|
||||
| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine |
|
||||
| WriteTask | Read data from Queue, compose a write batch and write into TDengine |
|
||||
| MockDataSource | Generate data for some sub tables of super table meters |
|
||||
| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
|
||||
| StmtWriter | Write in Parameter binding mode (Not finished yet) |
|
||||
|
@ -98,16 +95,16 @@ The main Program is responsible for:
|
|||
1. Create message queues
|
||||
2. Start writing threads
|
||||
3. Start reading threads
|
||||
4. Otuput writing speed every 10 seconds
|
||||
4. Output writing speed every 10 seconds
|
||||
|
||||
The main program provides 4 parameters for tuning:
|
||||
|
||||
1. The number of reading threads, default value is 1
|
||||
2. The number of writing threads, default alue is 2
|
||||
2. The number of writing threads, default value is 2
|
||||
3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
|
||||
4. The batch size of single write, default value is 3,000
|
||||
|
||||
The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug.
|
||||
The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enough.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
|
||||
|
@ -182,7 +179,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
|||
|
||||
**Launch in IDE**
|
||||
|
||||
1. Clone TDengine repolitory
|
||||
1. Clone TDengine repository
|
||||
```
|
||||
git clone git@github.com:taosdata/TDengine.git --depth 1
|
||||
```
|
||||
|
@ -285,7 +282,7 @@ Sample programs in Python uses multi-process and cross-process message queues.
|
|||
| run_read_task Function | Read data and distribute to message queues |
|
||||
| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
|
||||
| run_write_task Function | Read as much as possible data from message queue and write in batch |
|
||||
| SQLWriter Class | Write in SQL and create table utomatically |
|
||||
| SQLWriter Class | Write in SQL and create table automatically |
|
||||
| StmtWriter Class | Write in parameter binding mode (not finished yet) |
|
||||
|
||||
<details>
|
||||
|
@ -295,7 +292,7 @@ Sample programs in Python uses multi-process and cross-process message queues.
|
|||
|
||||
1. Monitoring process, initializes database and calculating writing speed
|
||||
2. Reading process (n), reads data from data source
|
||||
3. Writing process (m), wirtes data into TDengine
|
||||
3. Writing process (m), writes data into TDengine
|
||||
|
||||
`main` function provides 5 parameters:
|
||||
|
||||
|
@ -314,7 +311,7 @@ Sample programs in Python uses multi-process and cross-process message queues.
|
|||
<details>
|
||||
<summary>run_monitor_process</summary>
|
||||
|
||||
Monitoring process initilizes database and monitoring writing speed.
|
||||
Monitoring process initializes database and monitoring writing speed.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/fast_write_example.py:monitor}}
|
||||
|
@ -359,7 +356,7 @@ Writing process tries to read as much as possible data from message queue and wr
|
|||
|
||||
<details>
|
||||
|
||||
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, if the SQL length is closed to `maxSQLLength` the SQL will be executed immediately. To improve writing efficiency, it's better to increase `maxSQLLength` properly.
|
||||
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
|
||||
|
||||
<summary>SQLWriter</summary>
|
||||
|
||||
|
@ -375,7 +372,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
|
|||
|
||||
<summary>Launch Sample Program in Python</summary>
|
||||
|
||||
1. Prerequisities
|
||||
1. Prerequisites
|
||||
|
||||
- TDengine client driver has been installed
|
||||
- Python3 has been installed, the the version >= 3.8
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/InfluxDBLineExample.cs}}
|
||||
{{#include docs/examples/csharp/influxdbLine/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/OptsJsonExample.cs}}
|
||||
{{#include docs/examples/csharp/optsJSON/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/OptsTelnetExample.cs}}
|
||||
{{#include docs/examples/csharp/optsTelnet/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/SQLInsertExample.cs}}
|
||||
{{#include docs/examples/csharp/sqlInsert/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/StmtInsertExample.cs}}
|
||||
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/QueryExample.cs}}
|
||||
{{#include docs/examples/csharp/query/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/AsyncQueryExample.cs}}
|
||||
{{#include docs/examples/csharp/asyncQuery/Program.cs}}
|
||||
```
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```csharp
|
||||
{{#include docs/examples/csharp/SubscribeDemo.cs}}
|
||||
{{#include docs/examples/csharp/subscribe/Program.cs}}
|
||||
```
|
|
@ -3,6 +3,7 @@ title: Developer Guide
|
|||
---
|
||||
|
||||
Before creating an application to process time-series data with TDengine, consider the following:
|
||||
|
||||
1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages.
|
||||
2. Design the data model based on your own use cases. Consider the main [concepts](/concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
|
||||
3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
|
||||
|
|
|
@ -1,70 +1,70 @@
|
|||
---
|
||||
sidebar_label: Data Types
|
||||
title: Data Types
|
||||
description: "TDengine supports a variety of data types including timestamp, float, JSON and many others."
|
||||
description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.'
|
||||
---
|
||||
|
||||
## Timestamp
|
||||
|
||||
When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below:
|
||||
|
||||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
|
||||
- Internal function `now` can be used to get the current timestamp on the client side
|
||||
- The current timestamp of the client side is applied when `now` is used to insert data
|
||||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
||||
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
||||
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||
- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||
|
||||
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
||||
|
||||
```sql
|
||||
CREATE DATABASE db_name PRECISION 'ns';
|
||||
```
|
||||
|
||||
## Data Types
|
||||
|
||||
In TDengine, the data types below can be used when specifying a column or tag.
|
||||
|
||||
| # | **type** | **Bytes** | **Description** |
|
||||
| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported |
|
||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] |
|
||||
| 3 | INT UNSIGNED| 4| unsigned integer, the value range is [0, 2^32-1]
|
||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] |
|
||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1] |
|
||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
|
||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
|
||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] |
|
||||
| 10 | INT UNSIGNED| 2| unsigned integer, the value range is [0, 65535]|
|
||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] |
|
||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255] |
|
||||
| 13 | BOOL | 1 | Bool, the value range is {true, false} |
|
||||
| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
|
||||
| # | **type** | **Bytes** | **Description** |
|
||||
| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||
| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
|
||||
:::note
|
||||
- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
|
||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||
- The length of BINARY can be up to 16374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
|
||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||
|
||||
:::
|
||||
|
||||
|
||||
## Constants
|
||||
|
||||
TDengine supports a variety of constants:
|
||||
|
||||
| # | **Syntax** | **Type** | **Description** |
|
||||
| --- | :-------: | --------- | -------------------------------------- |
|
||||
| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
|
||||
| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
|
||||
| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
|
||||
| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash (\'). |
|
||||
| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash (\"). |
|
||||
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
|
||||
| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
|
||||
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
|
||||
| # | **Syntax** | **Type** | **Description** |
|
||||
| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
|
||||
| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
|
||||
| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
|
||||
| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash `\'`. |
|
||||
| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash `\"`. |
|
||||
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
|
||||
| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
|
||||
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
|
||||
|
||||
:::note
|
||||
Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||
|
|
|
@ -71,9 +71,9 @@ database_option: {
|
|||
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
||||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
||||
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
|
||||
|
||||
### Example Statement
|
||||
|
|
|
@ -49,6 +49,55 @@ The preceding SQL statement can be used in migration scenarios. It returns the C
|
|||
DESCRIBE [db_name.]stb_name;
|
||||
```
|
||||
|
||||
### View tag information for all child tables in the supertable
|
||||
|
||||
```
|
||||
taos> SHOW TABLE TAGS FROM st1;
|
||||
tbname | id | loc |
|
||||
======================================================================
|
||||
st1s1 | 1 | beijing |
|
||||
st1s2 | 2 | shanghai |
|
||||
st1s3 | 3 | guangzhou |
|
||||
Query OK, 3 rows in database (0.004455s)
|
||||
```
|
||||
|
||||
The first column of the returned result set is the subtable name, and the subsequent columns are the tag columns.
|
||||
|
||||
If you already know the name of the tag column, you can use the following statement to get the value of the specified tag column.
|
||||
|
||||
```
|
||||
taos> SELECT DISTINCT TBNAME, id FROM st1;
|
||||
tbname | id |
|
||||
===============================================
|
||||
st1s1 | 1 |
|
||||
st1s2 | 2 |
|
||||
st1s3 | 3 |
|
||||
Query OK, 3 rows in database (0.002891s)
|
||||
```
|
||||
|
||||
It should be noted that DISTINCT and TBNAME in the SELECT statement are essential, and TDengine will optimize the statement according to them, so that it can return the tag value correctly and quickly even when there is no data or a lot of data.
|
||||
|
||||
### View the tag information of a subtable
|
||||
|
||||
```
|
||||
taos> SHOW TAGS FROM st1s1;
|
||||
table_name | db_name | stable_name | tag_name | tag_type | tag_value |
|
||||
============================================================================================================
|
||||
st1s1 | test | st1 | id | INT | 1 |
|
||||
st1s1 | test | st1 | loc | VARCHAR(20) | beijing |
|
||||
Query OK, 2 rows in database (0.003684s)
|
||||
```
|
||||
|
||||
Similarly, you can also use the SELECT statement to query the value of the specified tag column.
|
||||
|
||||
```
|
||||
taos> SELECT DISTINCT TBNAME, id, loc FROM st1s1;
|
||||
tbname | id | loc |
|
||||
==================================================
|
||||
st1s1 | 1 | beijing |
|
||||
Query OK, 1 rows in database (0.001884s)
|
||||
```
|
||||
|
||||
## Drop STable
|
||||
|
||||
```
|
||||
|
|
|
@ -16,6 +16,8 @@ INSERT INTO
|
|||
[(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
...];
|
||||
|
||||
INSERT INTO tb_name [(field1_name, ...)] subquery
|
||||
```
|
||||
|
||||
**Timestamps**
|
||||
|
@ -37,7 +39,7 @@ INSERT INTO
|
|||
|
||||
4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
|
||||
|
||||
5. A single INSERT statement can write data to multiple tables.
|
||||
5. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
|
||||
|
||||
6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
|
||||
|
||||
|
@ -47,6 +49,8 @@ INSERT INTO
|
|||
|
||||
7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
|
||||
|
||||
8. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
|
||||
|
||||
## Insert a Record
|
||||
|
||||
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
|
||||
|
@ -104,11 +108,11 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('202
|
|||
|
||||
## Insert Rows From A File
|
||||
|
||||
Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and each field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data:
|
||||
Besides using `VALUES` to insert one or multiple rows, the data to be inserted can also be prepared in a CSV file with comma as separator and timestamp and string field value quoted by single quotes. Table definition is not required in the CSV file. For example, if file "/tmp/csvfile.csv" contains the below data:
|
||||
|
||||
```
|
||||
'2021-07-13 14:07:34.630', '10.2', '219', '0.32'
|
||||
'2021-07-13 14:07:35.779', '10.15', '217', '0.33'
|
||||
'2021-07-13 14:07:34.630', 10.2, 219, 0.32
|
||||
'2021-07-13 14:07:35.779', 10.15, 217, 0.33
|
||||
```
|
||||
|
||||
Then data in this file can be inserted by the SQL statement below:
|
||||
|
|
|
@ -11,7 +11,7 @@ SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW
|
|||
SELECT [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[PARTITION BY tag_list]
|
||||
[partition_by_clause]
|
||||
[window_clause]
|
||||
[group_by_clause]
|
||||
[order_by_clasue]
|
||||
|
@ -52,10 +52,8 @@ window_clause: {
|
|||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
changes_option: {
|
||||
DURATION duration_val
|
||||
| ROWS rows_val
|
||||
}
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
||||
group_by_clause:
|
||||
GROUP BY expr [, expr] ... HAVING condition
|
||||
|
@ -71,9 +69,9 @@ order_expr:
|
|||
|
||||
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
|
||||
|
||||
## Wildcards
|
||||
### Wildcards
|
||||
|
||||
You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included.
|
||||
You can use an asterisk (\*) as a wildcard character to indicate all columns. For normal tables or sub-tables, the asterisk indicates only data columns. For supertables, tag columns are also included when using asterisk (\*).
|
||||
|
||||
```sql
|
||||
SELECT * FROM d1001;
|
||||
|
@ -126,7 +124,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
|||
|
||||
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
|
||||
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
|
||||
3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -142,6 +139,8 @@ taos> SELECT ts, ts AS primary_key_ts FROM d1001;
|
|||
|
||||
### Pseudocolumns
|
||||
|
||||
**Pseudocolumn:** A pseudo-column behaves like a table column but is not actually stored in the table. You can select from pseudo-columns, but you cannot insert, update, or delete their values. A pseudo-column is also similar to a function without arguments. This section describes these pseudo-columns:
|
||||
|
||||
**TBNAME**
|
||||
The TBNAME pseudocolumn in a supertable contains the names of subtables within the supertable.
|
||||
|
||||
|
@ -185,6 +184,14 @@ In TDengine, the first column of all tables must be a timestamp. This column is
|
|||
select _rowts, max(current) from meters;
|
||||
```
|
||||
|
||||
**\_IROWTS**
|
||||
|
||||
The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolumn can be used to retrieve the corresponding timestamp column associated with the interpolation results.
|
||||
|
||||
```sql
|
||||
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
|
||||
```
|
||||
|
||||
## Query Objects
|
||||
|
||||
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
||||
|
@ -354,19 +361,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
|
|||
|
||||
:::info
|
||||
|
||||
- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query
|
||||
- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query.
|
||||
- Sub query is not allowed in continuous query.
|
||||
- The result of a nested query is returned as a virtual table used by the outer query. It's recommended to give an alias to this table for the convenience of using it in the outer query.
|
||||
- JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query.
|
||||
- UNION operation is not allowed in either inner query or outer query.
|
||||
- The functions that can be used in the inner query are the same as those that can be used in a non-nested query.
|
||||
- The features that can be used in the inner query are the same as those that can be used in a non-nested query.
|
||||
- `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query.
|
||||
- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions:
|
||||
- Functions
|
||||
- If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`.
|
||||
- Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`.
|
||||
- `IN` operator is not allowed in the outer query but can be used in the inner query.
|
||||
- `GROUP BY` is not supported in the outer query.
|
||||
- If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like INTERP,DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE.
|
||||
- If the result set returned by the inner query are not sorted in order by timestamp, then functions relying on data ordered by timestamp can't be used in the outer query, like LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE.
|
||||
- Functions that need to scan the data twice can't be used in the outer query, like PERCENTILE.
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ Single row functions return a result for each row.
|
|||
#### ABS
|
||||
|
||||
```sql
|
||||
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
ABS(expr)
|
||||
```
|
||||
|
||||
**Description**: The absolute value of a specific field.
|
||||
|
@ -31,7 +31,7 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### ACOS
|
||||
|
||||
```sql
|
||||
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
ACOS(expr)
|
||||
```
|
||||
|
||||
**Description**: The arc cosine of a specific field.
|
||||
|
@ -49,7 +49,7 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### ASIN
|
||||
|
||||
```sql
|
||||
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
ASIN(expr)
|
||||
```
|
||||
|
||||
**Description**: The arc sine of a specific field.
|
||||
|
@ -68,7 +68,7 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### ATAN
|
||||
|
||||
```sql
|
||||
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
ATAN(expr)
|
||||
```
|
||||
|
||||
**Description**: The arc tangent of a specific field.
|
||||
|
@ -87,7 +87,7 @@ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### CEIL
|
||||
|
||||
```sql
|
||||
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
CEIL(expr)
|
||||
```
|
||||
|
||||
**Description**: The rounded up value of a specific field
|
||||
|
@ -105,7 +105,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### COS
|
||||
|
||||
```sql
|
||||
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
COS(expr)
|
||||
```
|
||||
|
||||
**Description**: The cosine of a specific field.
|
||||
|
@ -123,16 +123,16 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### FLOOR
|
||||
|
||||
```sql
|
||||
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
FLOOR(expr)
|
||||
```
|
||||
|
||||
**Description**: The rounded down value of a specific field
|
||||
**Description**: The rounded down value of a specific field
|
||||
**More explanations**: The restrictions are same as those of the `CEIL` function.
|
||||
|
||||
#### LOG
|
||||
|
||||
```sql
|
||||
SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
LOG(expr [, base])
|
||||
```
|
||||
|
||||
**Description**: The logarithm of a specific field with `base` as the radix. If you do not enter a base, the natural logarithm of the field is returned.
|
||||
|
@ -151,7 +151,7 @@ SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### POW
|
||||
|
||||
```sql
|
||||
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
||||
POW(expr, power)
|
||||
```
|
||||
|
||||
**Description**: The power of a specific field with `power` as the exponent.
|
||||
|
@ -170,17 +170,17 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### ROUND
|
||||
|
||||
```sql
|
||||
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
ROUND(expr)
|
||||
```
|
||||
|
||||
**Description**: The rounded value of a specific field.
|
||||
**Description**: The rounded value of a specific field.
|
||||
**More explanations**: The restrictions are same as those of the `CEIL` function.
|
||||
|
||||
|
||||
#### SIN
|
||||
|
||||
```sql
|
||||
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SIN(expr)
|
||||
```
|
||||
|
||||
**Description**: The sine of a specific field.
|
||||
|
@ -198,7 +198,7 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### SQRT
|
||||
|
||||
```sql
|
||||
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SQRT(expr)
|
||||
```
|
||||
|
||||
**Description**: The square root of a specific field.
|
||||
|
@ -216,7 +216,7 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### TAN
|
||||
|
||||
```sql
|
||||
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
TAN(expr)
|
||||
```
|
||||
|
||||
**Description**: The tangent of a specific field.
|
||||
|
@ -238,7 +238,7 @@ Concatenation functions take strings as input and produce string or numeric valu
|
|||
#### CHAR_LENGTH
|
||||
|
||||
```sql
|
||||
SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
CHAR_LENGTH(expr)
|
||||
```
|
||||
|
||||
**Description**: The length in number of characters of a string
|
||||
|
@ -254,7 +254,7 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### CONCAT
|
||||
|
||||
```sql
|
||||
SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
||||
CONCAT(expr1, expr2 [, expr] ...)
|
||||
```
|
||||
|
||||
**Description**: The concatenation result of two or more strings
|
||||
|
@ -271,7 +271,7 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER
|
|||
#### CONCAT_WS
|
||||
|
||||
```sql
|
||||
SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
||||
CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
||||
```
|
||||
|
||||
**Description**: The concatenation result of two or more strings with separator
|
||||
|
@ -288,7 +288,7 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st
|
|||
#### LENGTH
|
||||
|
||||
```sql
|
||||
SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
LENGTH(expr)
|
||||
```
|
||||
|
||||
**Description**: The length in bytes of a string
|
||||
|
@ -305,7 +305,7 @@ SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### LOWER
|
||||
|
||||
```sql
|
||||
SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
LOWER(expr)
|
||||
```
|
||||
|
||||
**Description**: Convert the input string to lower case
|
||||
|
@ -322,7 +322,7 @@ SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### LTRIM
|
||||
|
||||
```sql
|
||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
LTRIM(expr)
|
||||
```
|
||||
|
||||
**Description**: Remove the left leading blanks of a string
|
||||
|
@ -339,7 +339,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### RTRIM
|
||||
|
||||
```sql
|
||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
LTRIM(expr)
|
||||
```
|
||||
|
||||
**Description**: Remove the right tailing blanks of a string
|
||||
|
@ -356,7 +356,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### SUBSTR
|
||||
|
||||
```sql
|
||||
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SUBSTR(expr, pos [, len])
|
||||
```
|
||||
|
||||
**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` - If `len` is not specified, it means from `pos` to the end.
|
||||
|
@ -373,7 +373,7 @@ SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### UPPER
|
||||
|
||||
```sql
|
||||
SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
UPPER(expr)
|
||||
```
|
||||
|
||||
**Description**: Convert the input string to upper case
|
||||
|
@ -394,10 +394,10 @@ Conversion functions change the data type of a value.
|
|||
#### CAST
|
||||
|
||||
```sql
|
||||
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
CAST(expr AS type_name)
|
||||
```
|
||||
|
||||
**Description**: Convert the input data `expression` into the type specified by `type_name`. This function can be used only in SELECT statements.
|
||||
**Description**: Convert the input data `expr` into the type specified by `type_name`. This function can be used only in SELECT statements.
|
||||
|
||||
**Return value type**: The type specified by parameter `type_name`
|
||||
|
||||
|
@ -418,7 +418,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### TO_ISO8601
|
||||
|
||||
```sql
|
||||
SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
||||
TO_ISO8601(expr [, timezone])
|
||||
```
|
||||
|
||||
**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
|
||||
|
@ -434,14 +434,14 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
|||
**More explanations**:
|
||||
|
||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
|
||||
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
||||
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
||||
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
||||
|
||||
|
||||
#### TO_JSON
|
||||
|
||||
```sql
|
||||
SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
|
||||
TO_JSON(str_literal)
|
||||
```
|
||||
|
||||
**Description**: Converts a string into JSON.
|
||||
|
@ -458,7 +458,7 @@ SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### TO_UNIXTIMESTAMP
|
||||
|
||||
```sql
|
||||
SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause];
|
||||
TO_UNIXTIMESTAMP(expr)
|
||||
```
|
||||
|
||||
**Description**: UNIX timestamp converted from a string of date/time format
|
||||
|
@ -486,9 +486,7 @@ All functions that return the current time, such as `NOW`, `TODAY`, and `TIMEZON
|
|||
#### NOW
|
||||
|
||||
```sql
|
||||
SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
|
||||
SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
|
||||
INSERT INTO tb_name VALUES (NOW(), ...);
|
||||
NOW()
|
||||
```
|
||||
|
||||
**Description**: The current time of the client side system
|
||||
|
@ -511,7 +509,7 @@ INSERT INTO tb_name VALUES (NOW(), ...);
|
|||
#### TIMEDIFF
|
||||
|
||||
```sql
|
||||
SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
|
||||
TIMEDIFF(expr1, expr2 [, time_unit])
|
||||
```
|
||||
|
||||
**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit`
|
||||
|
@ -534,7 +532,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM
|
|||
#### TIMETRUNCATE
|
||||
|
||||
```sql
|
||||
SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause];
|
||||
TIMETRUNCATE(expr, time_unit)
|
||||
```
|
||||
|
||||
**Description**: Truncate the input timestamp with unit specified by `time_unit`
|
||||
|
@ -555,7 +553,7 @@ SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name
|
|||
#### TIMEZONE
|
||||
|
||||
```sql
|
||||
SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
|
||||
TIMEZONE()
|
||||
```
|
||||
|
||||
**Description**: The timezone of the client side system
|
||||
|
@ -570,9 +568,7 @@ SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### TODAY
|
||||
|
||||
```sql
|
||||
SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
|
||||
SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
|
||||
INSERT INTO tb_name VALUES (TODAY(), ...);
|
||||
TODAY()
|
||||
```
|
||||
|
||||
**Description**: The timestamp of 00:00:00 of the client side system
|
||||
|
@ -599,7 +595,12 @@ TDengine supports the following aggregate functions:
|
|||
### APERCENTILE
|
||||
|
||||
```sql
|
||||
SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
APERCENTILE(expr, p [, algo_type])
|
||||
|
||||
algo_type: {
|
||||
"default"
|
||||
| "t-digest"
|
||||
}
|
||||
```
|
||||
|
||||
**Description**: Similar to `PERCENTILE`, but a simulated result is returned
|
||||
|
@ -611,13 +612,14 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
|
|||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**Explanations**:
|
||||
- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
|
||||
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||
|
||||
### AVG
|
||||
|
||||
```sql
|
||||
SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
||||
AVG(expr)
|
||||
```
|
||||
|
||||
**Description**: The average value of the specified fields.
|
||||
|
@ -632,7 +634,7 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
|||
### COUNT
|
||||
|
||||
```sql
|
||||
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
||||
COUNT({* | expr})
|
||||
```
|
||||
|
||||
**Description**: The number of records in the specified fields.
|
||||
|
@ -652,7 +654,7 @@ If you input a specific column, the number of non-null values in the column is r
|
|||
### ELAPSED
|
||||
|
||||
```sql
|
||||
SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
|
||||
ELAPSED(ts_primary_key [, time_unit])
|
||||
```
|
||||
|
||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||
|
@ -664,7 +666,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
|||
**Applicable tables**: table, STable, outter in nested query
|
||||
|
||||
**Explanations**:
|
||||
- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
||||
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
||||
- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window.
|
||||
|
@ -678,7 +680,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
|||
### LEASTSQUARES
|
||||
|
||||
```sql
|
||||
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
|
||||
LEASTSQUARES(expr, start_val, step_val)
|
||||
```
|
||||
|
||||
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
|
||||
|
@ -693,7 +695,7 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
|
|||
### SPREAD
|
||||
|
||||
```sql
|
||||
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
SPREAD(expr)
|
||||
```
|
||||
|
||||
**Description**: The difference between the max and the min of a specific column
|
||||
|
@ -708,7 +710,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### STDDEV
|
||||
|
||||
```sql
|
||||
SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
||||
STDDEV(expr)
|
||||
```
|
||||
|
||||
**Description**: Standard deviation of a specific column in a table or STable
|
||||
|
@ -723,7 +725,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
|||
### SUM
|
||||
|
||||
```sql
|
||||
SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
||||
SUM(expr)
|
||||
```
|
||||
|
||||
**Description**: The sum of a specific column in a table or STable
|
||||
|
@ -738,7 +740,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
|||
### HYPERLOGLOG
|
||||
|
||||
```sql
|
||||
SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
HYPERLOGLOG(expr)
|
||||
```
|
||||
|
||||
**Description**:
|
||||
|
@ -755,7 +757,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### HISTOGRAM
|
||||
|
||||
```sql
|
||||
SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
|
||||
HISTOGRAM(expr,bin_type, bin_description, normalized)
|
||||
```
|
||||
|
||||
**Description**:Returns count of data points in user-specified ranges.
|
||||
|
@ -768,14 +770,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
|||
|
||||
**Explanations**:
|
||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
|
||||
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
||||
- "user_input": "[1, 3, 5, 7]":
|
||||
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
||||
- "user_input": "[1, 3, 5, 7]":
|
||||
User specified bin values.
|
||||
|
||||
|
||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
|
||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
||||
|
||||
|
||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
|
||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
||||
|
@ -785,7 +787,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
|||
### PERCENTILE
|
||||
|
||||
```sql
|
||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
PERCENTILE(expr, p)
|
||||
```
|
||||
|
||||
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
|
||||
|
@ -796,7 +798,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
|||
|
||||
**Applicable table types**: table only
|
||||
|
||||
**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
|
||||
**More explanations**: _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||
|
||||
|
||||
## Selection Functions
|
||||
|
@ -806,7 +808,7 @@ Selection functions return one or more results depending. You can specify the ti
|
|||
### BOTTOM
|
||||
|
||||
```sql
|
||||
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
BOTTOM(expr, k)
|
||||
```
|
||||
|
||||
**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
||||
|
@ -826,7 +828,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### FIRST
|
||||
|
||||
```sql
|
||||
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
FIRST(expr)
|
||||
```
|
||||
|
||||
**Description**: The first non-null value of a specific column in a table or STable
|
||||
|
@ -846,7 +848,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### INTERP
|
||||
|
||||
```sql
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
|
||||
INTERP(expr)
|
||||
```
|
||||
|
||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
||||
|
@ -861,15 +863,17 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RA
|
|||
|
||||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
|
||||
- Interpolation is performed based on `FILL` parameter.
|
||||
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter.
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||
|
||||
### LAST
|
||||
|
||||
```sql
|
||||
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
LAST(expr)
|
||||
```
|
||||
|
||||
**Description**: The last non-NULL value of a specific column in a table or STable
|
||||
|
@ -890,7 +894,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### LAST_ROW
|
||||
|
||||
```sql
|
||||
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
||||
LAST_ROW(expr)
|
||||
```
|
||||
|
||||
**Description**: The last row of a table or STable
|
||||
|
@ -909,7 +913,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
|||
### MAX
|
||||
|
||||
```sql
|
||||
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
MAX(expr)
|
||||
```
|
||||
|
||||
**Description**: The maximum value of a specific column of a table or STable
|
||||
|
@ -924,7 +928,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### MIN
|
||||
|
||||
```sql
|
||||
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||
MIN(expr)
|
||||
```
|
||||
|
||||
**Description**: The minimum value of a specific column in a table or STable
|
||||
|
@ -939,10 +943,10 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
### MODE
|
||||
|
||||
```sql
|
||||
SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
||||
MODE(expr)
|
||||
```
|
||||
|
||||
**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence.
|
||||
**Description**:The value which has the highest frequency of occurrence. One random value is returned if there are multiple values which have highest frequency of occurrence.
|
||||
|
||||
**Return value type**: Same as the input data
|
||||
|
||||
|
@ -954,7 +958,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
|||
### SAMPLE
|
||||
|
||||
```sql
|
||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SAMPLE(expr, k)
|
||||
```
|
||||
|
||||
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
|
||||
|
@ -967,7 +971,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**More explanations**:
|
||||
**More explanations**:
|
||||
|
||||
This function cannot be used in expression calculation.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
@ -976,7 +980,7 @@ This function cannot be used in expression calculation.
|
|||
### TAIL
|
||||
|
||||
```sql
|
||||
SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
||||
TAIL(expr, k, offset_val)
|
||||
```
|
||||
|
||||
**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`.
|
||||
|
@ -993,7 +997,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
|||
### TOP
|
||||
|
||||
```sql
|
||||
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
TOP(expr, k)
|
||||
```
|
||||
|
||||
**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
|
||||
|
@ -1013,7 +1017,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
|||
### UNIQUE
|
||||
|
||||
```sql
|
||||
SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||
UNIQUE(expr)
|
||||
```
|
||||
|
||||
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used.
|
||||
|
@ -1032,7 +1036,7 @@ TDengine includes extensions to standard SQL that are intended specifically for
|
|||
### CSUM
|
||||
|
||||
```sql
|
||||
SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
CSUM(expr)
|
||||
```
|
||||
|
||||
**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows.
|
||||
|
@ -1045,17 +1049,22 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**More explanations**:
|
||||
|
||||
**More explanations**:
|
||||
|
||||
- Arithmetic operation can't be performed on the result of `csum` function
|
||||
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
||||
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
|
||||
### DERIVATIVE
|
||||
|
||||
```sql
|
||||
SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
|
||||
DERIVATIVE(expr, time_inerval, ignore_negative)
|
||||
|
||||
ignore_negative: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
|
||||
|
@ -1066,15 +1075,20 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**More explanation**:
|
||||
|
||||
**More explanation**:
|
||||
|
||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
|
||||
|
||||
### DIFF
|
||||
|
||||
```sql
|
||||
SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
|
||||
DIFF(expr [, ignore_negative])
|
||||
|
||||
ignore_negative: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
|
||||
|
@ -1085,7 +1099,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**More explanation**:
|
||||
**More explanation**:
|
||||
|
||||
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
|
||||
|
@ -1094,7 +1108,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
|||
### IRATE
|
||||
|
||||
```sql
|
||||
SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
||||
IRATE(expr)
|
||||
```
|
||||
|
||||
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
|
||||
|
@ -1109,7 +1123,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
|||
### MAVG
|
||||
|
||||
```sql
|
||||
SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
MAVG(expr, k)
|
||||
```
|
||||
|
||||
**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000].
|
||||
|
@ -1122,9 +1136,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**More explanations**:
|
||||
|
||||
- Arithmetic operation can't be performed on the result of `MAVG`.
|
||||
**More explanations**:
|
||||
|
||||
- Arithmetic operation can't be performed on the result of `MAVG`.
|
||||
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
|
@ -1132,7 +1146,7 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
|||
### STATECOUNT
|
||||
|
||||
```sql
|
||||
SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
|
||||
STATECOUNT(expr, oper, val)
|
||||
```
|
||||
|
||||
**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped.
|
||||
|
@ -1159,7 +1173,7 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
|
|||
### STATEDURATION
|
||||
|
||||
```sql
|
||||
SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause];
|
||||
STATEDURATION(expr, oper, val, unit)
|
||||
```
|
||||
|
||||
**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped.
|
||||
|
@ -1187,7 +1201,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
|
|||
### TWA
|
||||
|
||||
```sql
|
||||
SELECT TWA(field_name) FROM tb_name WHERE clause;
|
||||
TWA(expr)
|
||||
```
|
||||
|
||||
**Description**: Time weighted average on a specific column within a time range
|
||||
|
|
|
@ -5,11 +5,11 @@ title: Time-Series Extensions
|
|||
|
||||
As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL.
|
||||
|
||||
These extensions include tag-partitioned queries and windowed queries.
|
||||
These extensions include partitioned queries and windowed queries.
|
||||
|
||||
## Tag-Partitioned Queries
|
||||
## Partitioned Queries
|
||||
|
||||
When you query a supertable, you may need to partition the supertable by tag and perform additional operations on a specific partition. In this case, you can use the following SQL clause:
|
||||
When you query a supertable, you may need to partition the supertable by some dimensions and perform additional operations on a specific partition. In this case, you can use the following SQL clause:
|
||||
|
||||
```sql
|
||||
PARTITION BY part_list
|
||||
|
@ -17,22 +17,24 @@ PARTITION BY part_list
|
|||
|
||||
part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items.
|
||||
|
||||
A PARTITION BY clause with a tag is processed as follows:
|
||||
A PARTITION BY clause is processed as follows:
|
||||
|
||||
- The PARTITION BY clause must occur after the WHERE clause and cannot be used with a JOIN clause.
|
||||
- The PARTITION BY clause partitions the super table by the specified tag group, and the specified calculation is performed on each partition. The calculation performed is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
||||
- You can use PARTITION BY together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
|
||||
- The PARTITION BY clause must occur after the WHERE clause
|
||||
- The PARTITION BY caluse partitions the data according to the specified dimentions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
||||
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
|
||||
|
||||
```sql
|
||||
select max(current) from meters partition by location interval(10m)
|
||||
```
|
||||
|
||||
The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data.
|
||||
|
||||
## Windowed Queries
|
||||
|
||||
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. The query syntax is as follows:
|
||||
|
||||
```sql
|
||||
SELECT function_list FROM tb_name
|
||||
SELECT select_list FROM tb_name
|
||||
[WHERE where_condition]
|
||||
[SESSION(ts_col, tol_val)]
|
||||
[STATE_WINDOW(col)]
|
||||
|
@ -42,15 +44,9 @@ SELECT function_list FROM tb_name
|
|||
|
||||
The following restrictions apply:
|
||||
|
||||
### Restricted Functions
|
||||
|
||||
- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used.
|
||||
- `LAST_ROW` can't be used together with window aggregate.
|
||||
- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate.
|
||||
|
||||
### Other Rules
|
||||
|
||||
- The window clause must occur after the PARTITION BY clause and before the GROUP BY clause. It cannot be used with a GROUP BY clause.
|
||||
- The window clause must occur after the PARTITION BY clause. It cannot be used with a GROUP BY clause.
|
||||
- SELECT clauses on windows can contain only the following expressions:
|
||||
- Constants
|
||||
- Aggregate functions
|
||||
|
@ -82,7 +78,7 @@ These pseudocolumns occur after the aggregation clause.
|
|||
|
||||
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
|
||||
2. The result set is in ascending order of timestamp when you aggregate by time window.
|
||||
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group.
|
||||
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `PARTITION BY` is not used in the query, the result set will be returned in strict ascending order of timestamp; otherwise the result set will be returned in the order of ascending timestamp in each group.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -112,9 +108,9 @@ When using time windows, note the following:
|
|||
Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
|
||||
- The result set is in ascending order of timestamp when you aggregate by time window.
|
||||
|
||||
### Status Window
|
||||
### State Window
|
||||
|
||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
|
||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].
|
||||
|
||||

|
||||
|
||||
|
@ -124,13 +120,19 @@ In case of using integer, bool, or string to represent the status of a device at
|
|||
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
|
||||
```
|
||||
|
||||
Only care about the information of the status window when the status is 2. For example:
|
||||
|
||||
```
|
||||
SELECT * FROM (SELECT COUNT(*) AS cnt, FIRST(ts) AS fst, status FROM temp_tb_1 STATE_WINDOW(status)) t WHERE status = 2;
|
||||
```
|
||||
|
||||
### Session Window
|
||||
|
||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||
|
||||

|
||||
|
||||
If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now.
|
||||
If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically.
|
||||
|
||||
```
|
||||
|
||||
|
|
|
@ -44,13 +44,13 @@ For example, the following SQL statement creates a stream and automatically crea
|
|||
|
||||
```sql
|
||||
CREATE STREAM avg_vol_s INTO avg_vol AS
|
||||
SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||
```
|
||||
|
||||
## Delete a Stream
|
||||
|
||||
```sql
|
||||
DROP STREAM [IF NOT EXISTS] stream_name
|
||||
DROP STREAM [IF EXISTS] stream_name
|
||||
```
|
||||
|
||||
This statement deletes the stream processing service only. The data generated by the stream is retained.
|
||||
|
|
|
@ -5,7 +5,9 @@ title: Reserved Keywords
|
|||
|
||||
## Keyword List
|
||||
|
||||
There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. The following list shows all reserved keywords:
|
||||
There are more than 200 keywords reserved by TDengine, they can't be used as the name of database, table, STable, subtable, column or tag with either upper case, lower case or mixed case. If you need to use these keywords, use the symbol `` ` `` to enclose the keywords, e.g. \`ADD\`.
|
||||
|
||||
The following list shows all reserved keywords:
|
||||
|
||||
### A
|
||||
|
||||
|
@ -14,15 +16,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
- ACCOUNTS
|
||||
- ADD
|
||||
- AFTER
|
||||
- AGGREGATE
|
||||
- ALL
|
||||
- ALTER
|
||||
- ANALYZE
|
||||
- AND
|
||||
- APPS
|
||||
- AS
|
||||
- ASC
|
||||
- AT_ONCE
|
||||
- ATTACH
|
||||
|
||||
### B
|
||||
|
||||
- BALANCE
|
||||
- BEFORE
|
||||
- BEGIN
|
||||
- BETWEEN
|
||||
|
@ -32,19 +39,27 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
- BITNOT
|
||||
- BITOR
|
||||
- BLOCKS
|
||||
- BNODE
|
||||
- BNODES
|
||||
- BOOL
|
||||
- BUFFER
|
||||
- BUFSIZE
|
||||
- BY
|
||||
|
||||
### C
|
||||
|
||||
- CACHE
|
||||
- CACHELAST
|
||||
- CACHEMODEL
|
||||
- CACHESIZE
|
||||
- CASCADE
|
||||
- CAST
|
||||
- CHANGE
|
||||
- CLIENT_VERSION
|
||||
- CLUSTER
|
||||
- COLON
|
||||
- COLUMN
|
||||
- COMMA
|
||||
- COMMENT
|
||||
- COMP
|
||||
- COMPACT
|
||||
- CONCAT
|
||||
|
@ -52,15 +67,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
- CONNECTION
|
||||
- CONNECTIONS
|
||||
- CONNS
|
||||
- CONSUMER
|
||||
- CONSUMERS
|
||||
- CONTAINS
|
||||
- COPY
|
||||
- COUNT
|
||||
- CREATE
|
||||
- CTIME
|
||||
- CURRENT_USER
|
||||
|
||||
### D
|
||||
|
||||
- DATABASE
|
||||
- DATABASES
|
||||
- DAYS
|
||||
- DBS
|
||||
- DEFERRED
|
||||
- DELETE
|
||||
|
@ -69,18 +87,23 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
- DESCRIBE
|
||||
- DETACH
|
||||
- DISTINCT
|
||||
- DISTRIBUTED
|
||||
- DIVIDE
|
||||
- DNODE
|
||||
- DNODES
|
||||
- DOT
|
||||
- DOUBLE
|
||||
- DROP
|
||||
- DURATION
|
||||
|
||||
### E
|
||||
|
||||
- EACH
|
||||
- ENABLE
|
||||
- END
|
||||
- EQ
|
||||
- EVERY
|
||||
- EXISTS
|
||||
- EXPIRED
|
||||
- EXPLAIN
|
||||
|
||||
### F
|
||||
|
@ -88,18 +111,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
- FAIL
|
||||
- FILE
|
||||
- FILL
|
||||
- FIRST
|
||||
- FLOAT
|
||||
- FLUSH
|
||||
- FOR
|
||||
- FROM
|
||||
- FSYNC
|
||||
- FUNCTION
|
||||
- FUNCTIONS
|
||||
|
||||
### G
|
||||
|
||||
- GE
|
||||
- GLOB
|
||||
- GRANT
|
||||
- GRANTS
|
||||
- GROUP
|
||||
- GT
|
||||
|
||||
### H
|
||||
|
||||
|
@ -110,15 +135,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
- ID
|
||||
- IF
|
||||
- IGNORE
|
||||
- IMMEDIA
|
||||
- IMMEDIATE
|
||||
- IMPORT
|
||||
- IN
|
||||
- INITIAL
|
||||
- INDEX
|
||||
- INDEXES
|
||||
- INITIALLY
|
||||
- INNER
|
||||
- INSERT
|
||||
- INSTEAD
|
||||
- INT
|
||||
- INTEGER
|
||||
- INTERVA
|
||||
- INTERVAL
|
||||
- INTO
|
||||
- IS
|
||||
- ISNULL
|
||||
|
@ -126,6 +154,7 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
### J
|
||||
|
||||
- JOIN
|
||||
- JSON
|
||||
|
||||
### K
|
||||
|
||||
|
@ -135,46 +164,57 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
|
||||
### L
|
||||
|
||||
- LE
|
||||
- LAST
|
||||
- LAST_ROW
|
||||
- LICENCES
|
||||
- LIKE
|
||||
- LIMIT
|
||||
- LINEAR
|
||||
- LOCAL
|
||||
- LP
|
||||
- LSHIFT
|
||||
- LT
|
||||
|
||||
### M
|
||||
|
||||
- MATCH
|
||||
- MAX_DELAY
|
||||
- MAXROWS
|
||||
- MERGE
|
||||
- META
|
||||
- MINROWS
|
||||
- MINUS
|
||||
- MNODE
|
||||
- MNODES
|
||||
- MODIFY
|
||||
- MODULES
|
||||
|
||||
### N
|
||||
|
||||
- NE
|
||||
- NCHAR
|
||||
- NEXT
|
||||
- NMATCH
|
||||
- NONE
|
||||
- NOT
|
||||
- NOTNULL
|
||||
- NOW
|
||||
- NULL
|
||||
- NULLS
|
||||
|
||||
### O
|
||||
|
||||
- OF
|
||||
- OFFSET
|
||||
- ON
|
||||
- OR
|
||||
- ORDER
|
||||
- OUTPUTTYPE
|
||||
|
||||
### P
|
||||
|
||||
- PARTITION
|
||||
- PAGES
|
||||
- PAGESIZE
|
||||
- PARTITIONS
|
||||
- PASS
|
||||
- PLUS
|
||||
- PORT
|
||||
- PPS
|
||||
- PRECISION
|
||||
- PREV
|
||||
|
@ -182,47 +222,63 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
|
||||
### Q
|
||||
|
||||
- QNODE
|
||||
- QNODES
|
||||
- QTIME
|
||||
- QUERIE
|
||||
- QUERIES
|
||||
- QUERY
|
||||
- QUORUM
|
||||
|
||||
### R
|
||||
|
||||
- RAISE
|
||||
- REM
|
||||
- RANGE
|
||||
- RATIO
|
||||
- READ
|
||||
- REDISTRIBUTE
|
||||
- RENAME
|
||||
- REPLACE
|
||||
- REPLICA
|
||||
- RESET
|
||||
- RESTRIC
|
||||
- RESTRICT
|
||||
- RETENTIONS
|
||||
- REVOKE
|
||||
- ROLLUP
|
||||
- ROW
|
||||
- RP
|
||||
- RSHIFT
|
||||
|
||||
### S
|
||||
|
||||
- SCHEMALESS
|
||||
- SCORES
|
||||
- SELECT
|
||||
- SEMI
|
||||
- SERVER_STATUS
|
||||
- SERVER_VERSION
|
||||
- SESSION
|
||||
- SET
|
||||
- SHOW
|
||||
- SLASH
|
||||
- SINGLE_STABLE
|
||||
- SLIDING
|
||||
- SLIMIT
|
||||
- SMALLIN
|
||||
- SMA
|
||||
- SMALLINT
|
||||
- SNODE
|
||||
- SNODES
|
||||
- SOFFSET
|
||||
- STable
|
||||
- STableS
|
||||
- SPLIT
|
||||
- STABLE
|
||||
- STABLES
|
||||
- STAR
|
||||
- STATE
|
||||
- STATEMEN
|
||||
- STATE_WI
|
||||
- STATE_WINDOW
|
||||
- STATEMENT
|
||||
- STORAGE
|
||||
- STREAM
|
||||
- STREAMS
|
||||
- STRICT
|
||||
- STRING
|
||||
- SUBSCRIPTIONS
|
||||
- SYNCDB
|
||||
- SYSINFO
|
||||
|
||||
### T
|
||||
|
||||
|
@ -233,19 +289,24 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
- TBNAME
|
||||
- TIMES
|
||||
- TIMESTAMP
|
||||
- TIMEZONE
|
||||
- TINYINT
|
||||
- TO
|
||||
- TODAY
|
||||
- TOPIC
|
||||
- TOPICS
|
||||
- TRANSACTION
|
||||
- TRANSACTIONS
|
||||
- TRIGGER
|
||||
- TRIM
|
||||
- TSERIES
|
||||
- TTL
|
||||
|
||||
### U
|
||||
|
||||
- UMINUS
|
||||
- UNION
|
||||
- UNSIGNED
|
||||
- UPDATE
|
||||
- UPLUS
|
||||
- USE
|
||||
- USER
|
||||
- USERS
|
||||
|
@ -253,9 +314,13 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
|
||||
### V
|
||||
|
||||
- VALUE
|
||||
- VALUES
|
||||
- VARCHAR
|
||||
- VARIABLE
|
||||
- VARIABLES
|
||||
- VERBOSE
|
||||
- VGROUP
|
||||
- VGROUPS
|
||||
- VIEW
|
||||
- VNODES
|
||||
|
@ -263,14 +328,26 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
|
|||
### W
|
||||
|
||||
- WAL
|
||||
- WAL_FSYNC_PERIOD
|
||||
- WAL_LEVEL
|
||||
- WAL_RETENTION_PERIOD
|
||||
- WAL_RETENTION_SIZE
|
||||
- WAL_ROLL_PERIOD
|
||||
- WAL_SEGMENT_SIZE
|
||||
- WATERMARK
|
||||
- WHERE
|
||||
- WINDOW_CLOSE
|
||||
- WITH
|
||||
- WRITE
|
||||
|
||||
### \_
|
||||
|
||||
- \_C0
|
||||
- \_QSTART
|
||||
- \_QSTOP
|
||||
- \_IROWTS
|
||||
- \_QDURATION
|
||||
- \_WSTART
|
||||
- \_WSTOP
|
||||
- \_QEND
|
||||
- \_QSTART
|
||||
- \_ROWTS
|
||||
- \_WDURATION
|
||||
- \_WEND
|
||||
- \_WSTART
|
||||
|
|
|
@ -64,6 +64,12 @@ dnode_option: {
|
|||
|
||||
The parameters that you can modify through this statement are the same as those located in the dnode configuration file. Modifications that you make through this statement take effect immediately, while modifications to the configuration file take effect when the dnode restarts.
|
||||
|
||||
`value` is the value of the parameter, which needs to be in character format. For example, modify the log output level of dnode 1 to debug:
|
||||
|
||||
```sql
|
||||
ALTER DNODE 1 'debugFlag' '143';
|
||||
```
|
||||
|
||||
## Add an Mnode
|
||||
|
||||
```sql
|
||||
|
@ -136,19 +142,3 @@ The parameters that you can modify through this statement are the same as those
|
|||
```sql
|
||||
SHOW LOCAL VARIABLES;
|
||||
```
|
||||
|
||||
## Combine Vgroups
|
||||
|
||||
```sql
|
||||
MERGE VGROUP vgroup_no1 vgroup_no2;
|
||||
```
|
||||
|
||||
If load and data are not properly balanced among vgroups due to the data in different tim lines having different characteristics, you can combine or separate vgroups.
|
||||
|
||||
## Separate Vgroups
|
||||
|
||||
```sql
|
||||
SPLIT VGROUP vgroup_no;
|
||||
```
|
||||
|
||||
This statement creates a new vgroup and migrates part of the data from the original vgroup to the new vgroup with consistent hashing. During this process, the original vgroup can continue to provide services normally.
|
||||
|
|
|
@ -11,7 +11,15 @@ TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide acce
|
|||
4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems.
|
||||
5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables.
|
||||
|
||||
Note: SHOW statements are still supported for the convenience of existing users.
|
||||
:::info
|
||||
|
||||
- SHOW statements are still supported for the convenience of existing users.
|
||||
- Some columns in the system table may be keywords, and you need to use the escape character '\`' when querying, for example, to query the VGROUPS in the database `test`:
|
||||
```sql
|
||||
select `vgroups` from ins_databases where name = 'test';
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
||||
|
||||
|
@ -21,8 +29,8 @@ Provides information about dnodes. Similar to SHOW DNODES.
|
|||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------ | ------------------------- |
|
||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode |
|
||||
| 2 | vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||
| 3 | status | BINARY(10) | Current status |
|
||||
| 4 | note | BINARY(256) | Reason for going offline or other information |
|
||||
| 5 | id | SMALLINT | Dnode ID |
|
||||
|
@ -41,16 +49,6 @@ Provides information about mnodes. Similar to SHOW MNODES.
|
|||
| 4 | role_time | TIMESTAMP | Time at which the current role was assumed |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
|
||||
## INS_MODULES
|
||||
|
||||
Provides information about modules. Similar to SHOW MODULES.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------: | ------------ | ---------- |
|
||||
| 1 | id | SMALLINT | Module ID |
|
||||
| 2 | endpoint | BINARY(134) | Module endpoint |
|
||||
| 3 | module | BINARY(10) | Module status |
|
||||
|
||||
## INS_QNODES
|
||||
|
||||
Provides information about qnodes. Similar to SHOW QNODES.
|
||||
|
@ -80,29 +78,33 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
|||
| 1| name| BINARY(32)| Database name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||
| 4 | vgroups | INT | Number of vgroups |
|
||||
| 6 | replica | INT | Number of replicas |
|
||||
| 7 | quorum | BINARY(3) | Strong consistency |
|
||||
| 8 | duration | INT | Duration for storage of single files |
|
||||
| 9 | keep | INT | Data retention period |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB |
|
||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB |
|
||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine |
|
||||
| 13 | minrows | INT | Maximum number of records per file block |
|
||||
| 14 | maxrows | INT | Minimum number of records per file block |
|
||||
| 15 | comp | INT | Compression method |
|
||||
| 16 | precision | BINARY(2) | Time precision |
|
||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 17 | status | BINARY(10) | Current database status |
|
||||
| 18 | retention | BINARY (60) | Aggregation interval and retention period |
|
||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables |
|
||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data |
|
||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data |
|
||||
| 22 | wal_level | INT | WAL level |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk |
|
||||
| 24 | wal_retention_period | INT | WAL retention period |
|
||||
| 25 | wal_retention_size | INT | Maximum WAL size |
|
||||
| 26 | wal_roll_period | INT | WAL rotation period |
|
||||
| 27 | wal_segment_size | WAL file size |
|
||||
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 26 | wal_roll_period | INT | WAL rotation period. It should be noted that `wal_roll_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 27 | wal_segment_size | BIGINT | WAL file size. It should be noted that `wal_segment_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_FUNCTIONS
|
||||
|
||||
|
@ -111,8 +113,8 @@ Provides information about user-defined functions.
|
|||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------ | -------------- |
|
||||
| 1 | name | BINARY(64) | Function name |
|
||||
| 2 | comment | BINARY(255) | Function description |
|
||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function |
|
||||
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | output_type | BINARY(31) | Output data type |
|
||||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
| 6 | code_len | INT | Length of the source code |
|
||||
|
@ -141,12 +143,12 @@ Provides information about supertables.
|
|||
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | columns | INT | Number of columns |
|
||||
| 5 | tags | INT | Number of tags |
|
||||
| 5 | tags | INT | Number of tags. It should be noted that `tags` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | last_update | TIMESTAMP | Last updated time |
|
||||
| 7 | table_comment | BINARY(1024) | Table description |
|
||||
| 8 | watermark | BINARY(64) | Window closing time |
|
||||
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results |
|
||||
| 10 | rollup | BINARY(128) | Rollup aggregate function |
|
||||
| 8 | watermark | BINARY(64) | Window closing time. It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | max_delay | BINARY(64) | Maximum delay for pushing stream processing results. It should be noted that `max_delay` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | rollup | BINARY(128) | Rollup aggregate function. It should be noted that `rollup` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -161,7 +163,7 @@ Provides information about standard tables and subtables.
|
|||
| 5 | stable_name | BINARY(192) | Supertable name |
|
||||
| 6 | uid | BIGINT | Table ID |
|
||||
| 7 | vgroup_id | INT | Vgroup ID |
|
||||
| 8 | ttl | INT | Table time-to-live |
|
||||
| 8 | ttl | INT | Table time-to-live. It should be noted that `ttl` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | table_comment | BINARY(1024) | Table description |
|
||||
| 10 | type | BINARY(20) | Table type |
|
||||
|
||||
|
@ -194,13 +196,13 @@ Provides information about TDengine Enterprise Edition permissions.
|
|||
| --- | :---------: | ------------ | -------------------------------------------------- |
|
||||
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
||||
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
||||
| 3 | dnodes | BINARY(10) | Dnodes included in license |
|
||||
| 4 | streams | BINARY(10) | Streams included in license |
|
||||
| 5 | users | BINARY(10) | Users included in license |
|
||||
| 6 | streams | BINARY(10) | Accounts included in license |
|
||||
| 7 | storage | BINARY(21) | Storage space included in license |
|
||||
| 8 | connections | BINARY(21) | Client connections included in license |
|
||||
| 9 | databases | BINARY(11) | Databases included in license |
|
||||
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | streams | BINARY(10) | Streams included in license. It should be noted that `streams` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 5 | users | BINARY(10) | Users included in license. It should be noted that `users` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | accounts | BINARY(10) | Accounts included in license. It should be noted that `accounts` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | storage | BINARY(21) | Storage space included in license. It should be noted that `storage` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 8 | connections | BINARY(21) | Client connections included in license. It should be noted that `connections` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | databases | BINARY(11) | Databases included in license. It should be noted that `databases` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | speed | BINARY(9) | Write speed specified in license (data points per second) |
|
||||
| 11 | querytime | BINARY(9) | Total query time specified in license |
|
||||
| 12 | timeseries | BINARY(21) | Number of metrics included in license |
|
||||
|
@ -215,7 +217,7 @@ Provides information about vgroups.
|
|||
| --- | :-------: | ------------ | ------------------------------------------------------ |
|
||||
| 1 | vgroup_id | INT | Vgroup ID |
|
||||
| 2 | db_name | BINARY(32) | Database name |
|
||||
| 3 | tables | INT | Tables in vgroup |
|
||||
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 4 | status | BINARY(10) | Vgroup status |
|
||||
| 5 | v1_dnode | INT | Dnode ID of first vgroup member |
|
||||
| 6 | v1_status | BINARY(10) | Status of first vgroup member |
|
||||
|
@ -234,7 +236,7 @@ Provides system configuration information.
|
|||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------: | ------------ | ------------ |
|
||||
| 1 | name | BINARY(32) | Parameter |
|
||||
| 2 | value | BINARY(64) | Value |
|
||||
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
|
@ -244,4 +246,36 @@ Provides dnode configuration information.
|
|||
| --- | :------: | ------------ | ------------ |
|
||||
| 1 | dnode_id | INT | Dnode ID |
|
||||
| 2 | name | BINARY(32) | Parameter |
|
||||
| 3 | value | BINARY(64) | Value |
|
||||
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | Topic name |
|
||||
| 2 | db_name | BINARY(64) | Database for the topic |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
|
||||
|
||||
## INS_SUBSCRIPTIONS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||
|
||||
## INS_STREAMS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :----------: | ------------ | --------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | Stream name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | BIANRY(20) | Current status |
|
||||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BIANRY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
|
|
@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 12 | sub_status | BINARY(1000) | Subquery status |
|
||||
| 13 | sql | BINARY(1024) | SQL statement |
|
||||
|
||||
## PERF_TOPICS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------ | ------------------------------ |
|
||||
| 1 | topic_name | BINARY(192) | Topic name |
|
||||
| 2 | db_name | BINARY(64) | Database for the topic |
|
||||
| 3 | create_time | TIMESTAMP | Creation time |
|
||||
| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
|
||||
|
||||
## PERF_CONSUMERS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
|
@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
|
||||
| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering |
|
||||
|
||||
## PERF_SUBSCRIPTIONS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :------------: | ------------ | ------------------------ |
|
||||
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||
|
||||
## PERF_TRANS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
|
@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | stable_name | BINARY(192) | Supertable name |
|
||||
| 4 | vgroup_id | INT | Dedicated vgroup name |
|
||||
|
||||
## PERF_STREAMS
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :----------: | ------------ | --------------------------------------- |
|
||||
| 1 | stream_name | BINARY(64) | Stream name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | BIANRY(20) | Current status |
|
||||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BIANRY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
|
||||
|
|
|
@ -5,16 +5,6 @@ title: SHOW Statement for Metadata
|
|||
|
||||
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
|
||||
## SHOW ACCOUNTS
|
||||
|
||||
```sql
|
||||
SHOW ACCOUNTS;
|
||||
```
|
||||
|
||||
Shows information about tenants on the system.
|
||||
|
||||
Note: TDengine Enterprise Edition only.
|
||||
|
||||
## SHOW APPS
|
||||
|
||||
```sql
|
||||
|
@ -23,14 +13,6 @@ SHOW APPS;
|
|||
|
||||
Shows all clients (such as applications) that connect to the cluster.
|
||||
|
||||
## SHOW BNODES
|
||||
|
||||
```sql
|
||||
SHOW BNODES;
|
||||
```
|
||||
|
||||
Shows information about backup nodes (bnodes) in the system.
|
||||
|
||||
## SHOW CLUSTER
|
||||
|
||||
```sql
|
||||
|
@ -138,14 +120,6 @@ SHOW MNODES;
|
|||
|
||||
Shows information about mnodes in the system.
|
||||
|
||||
## SHOW MODULES
|
||||
|
||||
```sql
|
||||
SHOW MODULES;
|
||||
```
|
||||
|
||||
Shows information about modules installed in the system.
|
||||
|
||||
## SHOW QNODES
|
||||
|
||||
```sql
|
||||
|
@ -164,14 +138,6 @@ Shows information about the storage space allowed by the license.
|
|||
|
||||
Note: TDengine Enterprise Edition only.
|
||||
|
||||
## SHOW SNODES
|
||||
|
||||
```sql
|
||||
SHOW SNODES;
|
||||
```
|
||||
|
||||
Shows information about stream processing nodes (snodes) in the system.
|
||||
|
||||
## SHOW STABLES
|
||||
|
||||
```sql
|
||||
|
|
|
@ -9,15 +9,54 @@ This document describes how to manage permissions in TDengine.
|
|||
## Create a User
|
||||
|
||||
```sql
|
||||
CREATE USER use_name PASS 'password';
|
||||
CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
|
||||
```
|
||||
|
||||
This statement creates a user account.
|
||||
|
||||
The maximum length of use_name is 23 bytes.
|
||||
The maximum length of user_name is 23 bytes.
|
||||
|
||||
The maximum length of password is 128 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty.
|
||||
|
||||
`SYSINFO` indicates whether the user is allowed to view system information. `1` means allowed, `0` means not allowed. System information includes server configuration, dnode, vnode, storage. The default value is `1`.
|
||||
|
||||
For example, we can create a user whose password is `123456` and is able to view system information.
|
||||
|
||||
```sql
|
||||
taos> create user test pass '123456' sysinfo 1;
|
||||
Query OK, 0 of 0 rows affected (0.001254s)
|
||||
```
|
||||
|
||||
## View Users
|
||||
|
||||
To show the users in the system, please use
|
||||
|
||||
```sql
|
||||
SHOW USERS;
|
||||
```
|
||||
|
||||
This is an example:
|
||||
|
||||
```sql
|
||||
taos> show users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001657s)
|
||||
```
|
||||
|
||||
Alternatively, you can get the user information by querying a built-in table, INFORMATION_SCHEMA.INS_USERS. For example:
|
||||
|
||||
```sql
|
||||
taos> select * from information_schema.ins_users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001953s)
|
||||
```
|
||||
|
||||
## Delete a User
|
||||
|
||||
```sql
|
||||
|
@ -40,6 +79,13 @@ alter_user_clause: {
|
|||
- ENABLE: Specify whether the user is enabled or disabled. 1 indicates enabled and 0 indicates disabled.
|
||||
- SYSINFO: Specify whether the user can query system information. 1 indicates that the user can query system information and 0 indicates that the user cannot query system information.
|
||||
|
||||
For example, you can use below command to disable user `test`:
|
||||
|
||||
```sql
|
||||
taos> alter user test enable 0;
|
||||
Query OK, 0 of 0 rows affected (0.001160s)
|
||||
```
|
||||
|
||||
|
||||
## Grant Permissions
|
||||
|
||||
|
@ -62,7 +108,7 @@ priv_level : {
|
|||
}
|
||||
```
|
||||
|
||||
Grant permissions to a user.
|
||||
Grant permissions to a user, this feature is only available in enterprise edition.
|
||||
|
||||
Permissions are granted on the database level. You can grant read or write permissions.
|
||||
|
||||
|
@ -92,4 +138,4 @@ priv_level : {
|
|||
|
||||
```
|
||||
|
||||
Revoke permissions from a user.
|
||||
Revoke permissions from a user, this feature is only available in enterprise edition.
|
||||
|
|
|
@ -16,10 +16,10 @@ You can use the SHOW CONNECTIONS statement to find the conn_id.
|
|||
## Terminate a Query
|
||||
|
||||
```sql
|
||||
SHOW QUERY query_id;
|
||||
KILL QUERY kill_id;
|
||||
```
|
||||
|
||||
You can use the SHOW QUERIES statement to find the query_id.
|
||||
You can use the SHOW QUERIES statement to find the kill_id.
|
||||
|
||||
## Terminate a Transaction
|
||||
|
||||
|
|
|
@ -11,12 +11,13 @@ description: "This document explains how TDengine SQL has changed in version 3.0
|
|||
| 1 | VARCHAR | Added | Alias of BINARY.
|
||||
| 2 | TIMESTAMP literal | Added | TIMESTAMP 'timestamp format' syntax now supported.
|
||||
| 3 | _ROWTS pseudocolumn | Added | Indicates the primary key. Alias of _C0.
|
||||
| 4 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
|
||||
| 5 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
|
||||
| 6 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
|
||||
| 7 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
|
||||
| 8 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
|
||||
| 9 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
|
||||
| 4 | _IROWTS pseudocolumn | Added | Used to retrieve timestamps with INTERP function.
|
||||
| 5 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
|
||||
| 6 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
|
||||
| 7 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
|
||||
| 8 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
|
||||
| 9 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
|
||||
| 10 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
|
||||
|
||||
## SQL Syntax
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
title: TDengine Monitoring
|
||||
---
|
||||
|
||||
After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
|
||||
After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
|
||||
|
||||
The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file.
|
||||
|
||||
|
@ -10,7 +10,7 @@ The collection of the monitoring information is enabled by default, but can be d
|
|||
|
||||
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
|
||||
|
||||
From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine.
|
||||
Please refer to [TDinsight Grafana Dashboard](../../reference/tdinsight) to learn more details about using TDinsight to monitor TDengine.
|
||||
|
||||
A script `TDinsight.sh` is provided to deploy TDinsight automatically.
|
||||
|
||||
|
@ -30,31 +30,14 @@ Prepare:
|
|||
|
||||
2. Grafana Alert Notification
|
||||
|
||||
There are two ways to setup Grafana alert notification.
|
||||
You can use below command to setup Grafana alert notification.
|
||||
|
||||
- An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
||||
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
||||
|
||||
```bash
|
||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||
```
|
||||
|
||||
- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of enabling this plugin are listed below:
|
||||
|
||||
- `-I`: AliCloud SMS Key ID
|
||||
- `-K`: AliCloud SMS Key Secret
|
||||
- `-S`: AliCloud SMS Signature
|
||||
- `-C`: SMS notification template
|
||||
- `-T`: Input parameters in JSON format for the SMS notification template, for example`{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`
|
||||
- `-B`: List of mobile numbers to be notified
|
||||
|
||||
Below is an example of the full command using the AliCloud SMS alert.
|
||||
|
||||
```bash
|
||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \
|
||||
-I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \
|
||||
-T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
|
||||
```
|
||||
|
||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||
|
||||
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
|
||||
|
|
|
@ -6,7 +6,7 @@ title: Problem Diagnostics
|
|||
|
||||
When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems.
|
||||
|
||||
Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows.
|
||||
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
||||
|
||||
Diagnostic steps:
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.
|
|||
|
||||
### HTTP body structure
|
||||
|
||||
#### Successful Operation
|
||||
#### Successful Insert Operation
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -143,7 +143,7 @@ Description:
|
|||
- rows: (`int`) Only returns `1`.
|
||||
- data: (`[][]any`) Returns the number of rows affected.
|
||||
|
||||
#### Successful Query
|
||||
#### Successful Query Operation
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -13,11 +13,13 @@ After TDengine server or client installation, `taos.h` is located at
|
|||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
|
||||
The dynamic libraries for the TDengine client driver are located in.
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## Supported platforms
|
||||
|
||||
|
@ -119,7 +121,7 @@ This section shows sample code for standard access methods to TDengine clusters
|
|||
|
||||
:::info
|
||||
More example code and downloads are available at [GitHub](https://github.com/taosdata/TDengine/tree/develop/examples/c).
|
||||
You can find it in the installation directory under the `examples/c` path. This directory has a makefile and can be compiled under Linux by executing `make` directly.
|
||||
You can find it in the installation directory under the `examples/c` path. This directory has a makefile and can be compiled under Linux/macOS by executing `make` directly.
|
||||
**Hint:** When compiling in an ARM environment, please remove `-msse4.2` from the makefile. This option is only supported on the x64/x86 hardware platforms.
|
||||
|
||||
:::
|
||||
|
|
|
@ -109,7 +109,7 @@ TDengine's JDBC URL specification format is:
|
|||
|
||||
For establishing connections, native connections differ slightly from REST connections.
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
```java
|
||||
|
@ -120,13 +120,13 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
|
|||
|
||||
In the above example, TSDBDriver, which uses a JDBC native connection, establishes a connection to a hostname `taosdemo.com`, port `6030` (the default port for TDengine), and a database named `test`. In this URL, the user name `user` is specified as `root`, and the `password` is `taosdata`.
|
||||
|
||||
Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows).
|
||||
Note: With JDBC native connections, taos-jdbcdriver relies on the client driver (`libtaos.so` on Linux; `taos.dll` on Windows; `libtaos.dylib` on macOS).
|
||||
|
||||
The configuration parameters in the URL are as follows:
|
||||
|
||||
- user: Log in to the TDengine username. The default value is 'root'.
|
||||
- password: User login password, the default value is 'taosdata'.
|
||||
- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS.
|
||||
- cfgdir: client configuration file directory path, default '/etc/taos' on Linux OS, 'C:/TDengine/cfg' on Windows OS, '/etc/taos' on macOS.
|
||||
- charset: The character set used by the client, the default value is the system character set.
|
||||
- locale: Client locale, by default, use the system's current locale.
|
||||
- timezone: The time zone used by the client, the default value is the system's current time zone.
|
||||
|
@ -172,7 +172,7 @@ In the above example, JDBC uses the client's configuration file to establish a c
|
|||
|
||||
In TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established normally.
|
||||
|
||||
The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, and the default path is `C://TDengine/cfg/taos.cfg` on Windows.
|
||||
The configuration file here refers to the configuration file on the machine where the application that calls the JDBC Connector is located, the default path is `/etc/taos/taos.cfg` on Linux, the default path is `C://TDengine/cfg/taos.cfg` on Windows, and the default path is `/etc/taos/taos.cfg` on macOS.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
@ -261,7 +261,7 @@ The configuration parameters in properties are as follows.
|
|||
- TSDBDriver.PROPERTY_KEY_PASSWORD: user login password, default value 'taosdata'.
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_LOAD: true: pull the result set in batch when executing query; false: pull the result set row by row. The default value is: false.
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true: when executing executeBatch of Statement, if there is a SQL execution failure in the middle, continue to execute the following sql. false: no longer execute any statement after the failed SQL. The default value is: false.
|
||||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS.
|
||||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS, default value `/etc/taos` on macOS.
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
||||
|
@ -896,7 +896,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
|
||||
**Cause**: The program did not find the dependent native library `taos`.
|
||||
|
||||
**Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work.
|
||||
**Solution**: On Windows you can copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32` directory, on Linux the following soft link will be created `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` will work, on macOS the lib soft link will be `/usr/local/lib/libtaos.dylib`.
|
||||
|
||||
3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ title: TDengine Go Connector
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
|
||||
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
|
||||
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
|
||||
|
@ -113,7 +112,7 @@ username:password@protocol(address)/dbname?param=value
|
|||
```
|
||||
### Connecting via connector
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
_taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver.
|
||||
|
@ -176,6 +175,37 @@ func main() {
|
|||
}
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="WebSocket" label="WebSocket connection">
|
||||
|
||||
_taosRestful_ implements Go's `database/sql/driver` interface via `http client`. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver (driver-go minimum version 3.0.2).
|
||||
|
||||
Use `taosWS` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName` with the following parameters supported by the DSN.
|
||||
|
||||
* `writeTimeout` The timeout to send data via WebSocket.
|
||||
* `readTimeout` The timeout to receive response data via WebSocket.
|
||||
|
||||
For example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/taosdata/driver-go/v3/taosWS"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var taosUri = "root:taosdata@ws(localhost:6041)/"
|
||||
taos, err := sql.Open("taosWS", taosUri)
|
||||
if err != nil {
|
||||
fmt.Println("failed to connect TDengine, err:", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Usage examples
|
||||
|
@ -331,7 +361,7 @@ Creates consumer group.
|
|||
|
||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
||||
|
||||
Subscribes to a topic.
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
|
||||
|
@ -409,6 +439,30 @@ Close consumer.
|
|||
|
||||
Closes the parameter binding.
|
||||
|
||||
### Subscribe via WebSocket
|
||||
|
||||
* `func NewConsumer(config *Config) (*Consumer, error)`
|
||||
|
||||
Creates consumer group.
|
||||
|
||||
* `func (c *Consumer) Subscribe(topic []string) error`
|
||||
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
|
||||
Polling information.
|
||||
|
||||
* `func (c *Consumer) Commit(messageID uint64) error`
|
||||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
Close consumer.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
|
||||
## API Reference
|
||||
|
||||
Full API see [driver-go documentation](https://pkg.go.dev/github.com/taosdata/driver-go/v3)
|
||||
|
|
|
@ -7,7 +7,7 @@ title: TDengine Rust Connector
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import Preparition from "./_preparation.mdx"
|
||||
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
|
||||
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
|
||||
|
@ -55,16 +55,6 @@ taos = "*"
|
|||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="native" label="native connection only">
|
||||
|
||||
In `cargo.toml`, add [taos][taos] and enable the native feature:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
taos = { version = "*", default-features = false, features = ["native"] }
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="Websocket only">
|
||||
|
||||
In `cargo.toml`, add [taos][taos] and enable the ws feature:
|
||||
|
@ -75,6 +65,18 @@ taos = { version = "*", default-features = false, features = ["ws"] }
|
|||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="native" label="native connection only">
|
||||
|
||||
In `cargo.toml`, add [taos][taos] and enable the native feature:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
taos = { version = "*", default-features = false, features = ["native"] }
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
|
||||
## Establishing a connection
|
||||
|
@ -116,7 +118,7 @@ The parameters are described as follows:
|
|||
- **protocol**: Specify which connection method to use. For example, `taos+ws://localhost:6041` uses Websocket to establish connections.
|
||||
- **username/password**: Username and password used to create connections.
|
||||
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
||||
- **database**: Specify the default database to connect to.
|
||||
- **database**: Specify the default database to connect to. It's optional.
|
||||
- **params**:Optional parameters.
|
||||
|
||||
A sample DSN description string is as follows:
|
||||
|
@ -155,15 +157,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
|
|||
let inserted = taos.exec_many([
|
||||
// create super table
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
||||
TAGS (`groupid` INT, `location` BINARY(16))",
|
||||
TAGS (`groupid` INT, `location` BINARY(24))",
|
||||
// create child table
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
|
||||
// insert into child table
|
||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||
// insert with NULL values
|
||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||
// insert and automatically create table with tags if not exists
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
// insert many records in a single sql
|
||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||
]).await?;
|
||||
|
|
|
@ -7,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
|
||||
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
|
||||
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
||||
|
||||
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
|
||||
|
@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
|||
|
||||
### Preparation
|
||||
|
||||
1. Install Python. Python >= 3.6 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
||||
|
||||
|
@ -80,7 +80,7 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
|
|||
|
||||
### Verify
|
||||
|
||||
<Tabs groupId="connect" default="native">
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type.
|
||||
|
@ -118,10 +118,10 @@ Requirement already satisfied: taospy in c:\users\username\appdata\local\program
|
|||
|
||||
Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster.
|
||||
|
||||
<Tabs>
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a standalone version) can be resolved locally, by testing with the `ping` command.
|
||||
Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a stand-alone version) can be resolved locally, by testing with the `ping` command.
|
||||
|
||||
```
|
||||
ping <FQDN>
|
||||
|
@ -173,7 +173,7 @@ If the test is successful, it will output the server version information, e.g.
|
|||
|
||||
The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort.
|
||||
|
||||
<Tabs>
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection" groupId="connect">
|
||||
|
||||
```python
|
||||
|
@ -186,7 +186,7 @@ All arguments of the `connect()` function are optional keyword arguments. The fo
|
|||
- `user` : The TDengine user name. The default value is `root`.
|
||||
- `password` : TDengine user password. The default value is `taosdata`.
|
||||
- `port` : The starting port of the data node to connect to, i.e., the serverPort configuration. The default value is 6030, which will only take effect if the host parameter is provided.
|
||||
- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux systems.
|
||||
- `config` : The path to the client configuration file. On Windows systems, the default is `C:\TDengine\cfg`. The default is `/etc/taos/` on Linux/macOS.
|
||||
- `timezone` : The timezone used to convert the TIMESTAMP data in the query results to python `datetime` objects. The default is the local timezone.
|
||||
|
||||
:::warning
|
||||
|
@ -219,7 +219,7 @@ All arguments to the `connect()` function are optional keyword arguments. The fo
|
|||
|
||||
### Basic Usage
|
||||
|
||||
<Tabs default="native" groupId="connect">
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
##### TaosConnection class
|
||||
|
@ -289,7 +289,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
|||
|
||||
### Used with pandas
|
||||
|
||||
<Tabs default="native" groupId="connect">
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
```python
|
||||
|
|
|
@ -7,7 +7,7 @@ title: TDengine Node.js Connector
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
import Preparition from "./_preparition.mdx";
|
||||
import Preparition from "./_preparation.mdx";
|
||||
import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
|
||||
import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
|
||||
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
|
||||
|
@ -85,7 +85,7 @@ If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and
|
|||
|
||||
### Install via npm
|
||||
|
||||
<Tabs defaultValue="install_native">
|
||||
<Tabs defaultValue="install_rest">
|
||||
<TabItem value="install_native" label="Install native connector">
|
||||
|
||||
```bash
|
||||
|
@ -124,7 +124,7 @@ node nodejsChecker.js host=localhost
|
|||
|
||||
Please choose to use one of the connectors.
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
Install and import the `@tdengine/client` package.
|
||||
|
|
|
@ -7,7 +7,7 @@ title: C# Connector
|
|||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
import Preparition from "./_preparition.mdx"
|
||||
import Preparition from "./_preparation.mdx"
|
||||
import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
|
||||
import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
|
||||
import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
|
||||
|
@ -17,7 +17,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
|
||||
|
||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [REST API](/reference/rest-api/) documentation.
|
||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
||||
|
||||
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
|
||||
|
||||
|
@ -35,6 +35,10 @@ Please refer to [version support list](/reference/connector#version-support)
|
|||
|
||||
## Supported features
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
|
||||
<TabItem value="native" label="Native Connection">
|
||||
|
||||
1. Connection Management
|
||||
2. General Query
|
||||
3. Continuous Query
|
||||
|
@ -42,6 +46,18 @@ Please refer to [version support list](/reference/connector#version-support)
|
|||
5. Subscription
|
||||
6. Schemaless
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="WebSocket Connection">
|
||||
|
||||
1. Connection Management
|
||||
2. General Query
|
||||
3. Continuous Query
|
||||
4. Parameter Binding
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Installation Steps
|
||||
|
||||
### Pre-installation preparation
|
||||
|
@ -74,12 +90,18 @@ cp -r src/ myProject
|
|||
cd myProject
|
||||
dotnet add exmaple.csproj reference src/TDengine.csproj
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Establish a Connection
|
||||
|
||||
``` C#
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
||||
<TabItem value="native" label="Native Connection">
|
||||
|
||||
``` csharp
|
||||
using TDengineDriver;
|
||||
|
||||
namespace TDengineExample
|
||||
|
@ -112,14 +134,62 @@ namespace TDengineExample
|
|||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="WebSocket Connection">
|
||||
|
||||
The structure of the DSN description string is as follows:
|
||||
|
||||
```text
|
||||
[<protocol>]://[[<username>:<password>@]<host>:<port>][/<database>][?<p1>=<v1>[&<p2>=<v2>]]
|
||||
|------------|---|-----------|-----------|------|------|------------|-----------------------|
|
||||
| protocol | | username | password | host | port | database | params |
|
||||
```
|
||||
|
||||
The parameters are described as follows:
|
||||
|
||||
* **protocol**: Specify which connection method to use (support http/ws). For example, `ws://localhost:6041` uses Websocket to establish connections.
|
||||
* **username/password**: Username and password used to create connections.
|
||||
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
||||
* **database**: Specify the default database to connect to. It's optional.
|
||||
* **params**:Optional parameters.
|
||||
|
||||
A sample DSN description string is as follows:
|
||||
|
||||
```text
|
||||
ws://localhost:6041/test
|
||||
```
|
||||
|
||||
``` csharp
|
||||
{{#include docs/examples/csharp/wsConnect/Program.cs}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Usage examples
|
||||
|
||||
### Write data
|
||||
|
||||
#### SQL Write
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
||||
<TabItem value="native" label="Native Connection">
|
||||
|
||||
<CSInsert />
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="WebSocket Connection">
|
||||
|
||||
```csharp
|
||||
{{#include docs/examples/csharp/wsInsert/Program.cs}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### InfluxDB line protocol write
|
||||
|
||||
<CSInfluxLine />
|
||||
|
@ -132,12 +202,48 @@ namespace TDengineExample
|
|||
|
||||
<CSOpenTSDBJson />
|
||||
|
||||
#### Parameter Binding
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
||||
<TabItem value="native" label="Native Connection">
|
||||
|
||||
``` csharp
|
||||
{{#include docs/examples/csharp/stmtInsert/Program.cs}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="WebSocket Connection">
|
||||
|
||||
```csharp
|
||||
{{#include docs/examples/csharp/wsStmt/Program.cs}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Query data
|
||||
|
||||
#### Synchronous Query
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
||||
<TabItem value="native" label="Native Connection">
|
||||
|
||||
<CSQuery />
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="WebSocket Connection">
|
||||
|
||||
```csharp
|
||||
{{#include docs/examples/csharp/wsQuery/Program.cs}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Asynchronous query
|
||||
|
||||
<CSAsyncQuery />
|
||||
|
@ -145,18 +251,21 @@ namespace TDengineExample
|
|||
### More sample programs
|
||||
|
||||
|Sample program |Sample program description |
|
||||
|--------------------------------------------------------------------------------------------------------------------|------------ --------------------------------|
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Data subscription with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
|
||||
## Important update records
|
||||
|
||||
| TDengine.Connector | Description |
|
||||
|--------------------|--------------------------------|
|
||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
||||
|
|
|
@ -13,11 +13,13 @@ After TDengine client or server is installed, `taos.h` is located at:
|
|||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
|
||||
TDengine client driver is located at:
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS:`/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={1} sys="Linux" />
|
||||
|
||||
[All Downloads](../../releases)
|
||||
[All Downloads](../../releases/tdengine)
|
||||
|
||||
2. Unzip
|
||||
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
import PkgListV3 from "/components/PkgListV3";
|
||||
|
||||
1. Download the client installation package
|
||||
|
||||
<PkgListV3 type={8} sys="macOS" />
|
||||
|
||||
[All Downloads](../../releases/tdengine)
|
||||
|
||||
2. Execute the installer, select the default value as prompted, and complete the installation. If the installation is blocked, you can right-click or ctrl-click on the installation package and select `Open`.
|
||||
3. configure taos.cfg
|
||||
|
||||
Edit `taos.cfg` file (full path is `/etc/taos/taos.cfg` by default), modify `firstEP` with actual TDengine server's End Point, for example `h1.tdengine.com:6030`
|
||||
|
||||
:::tip
|
||||
|
||||
1. If the computer does not run the TDengine service but installs the TDengine client driver, then you need to config `firstEP` in `taos.cfg` only, and there is no need to configure `FQDN`;
|
||||
2. If you encounter the "Unable to resolve FQDN" error, please make sure the FQDN in the `/etc/hosts` file of the current computer is correctly configured, or the DNS service is correctly configured.
|
||||
|
||||
:::
|
|
@ -6,5 +6,6 @@ Since the TDengine client driver is written in C, using the native connection re
|
|||
|
||||
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
||||
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
||||
- libtaos.dylib: After successful installation of TDengine on a mac system, the dependent macOS version of the client driver `libtaos.dylib` file will be automatically linked to `/usr/local/lib/libtaos.dylib`, which is included in the macOS scannable path and does not need to be specified separately.
|
||||
|
||||
:::
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装)
|
||||
|
||||
:::info
|
||||
|
||||
由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。
|
||||
|
||||
- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
|
||||
:::
|
|
@ -4,11 +4,11 @@ Execute TDengine CLI program `taos` directly from the Linux shell to connect to
|
|||
$ taos
|
||||
|
||||
taos> show databases;
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
db | 2022-08-04 14:14:49.385 | 2 | 4 | 1 | off | 14400m | 5254560m,5254560m,5254560m | 96 | 4 | 256 | 100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||
name |
|
||||
=================================
|
||||
information_schema |
|
||||
performance_schema |
|
||||
db |
|
||||
Query OK, 3 rows in database (0.019154s)
|
||||
|
||||
taos>
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
Execute TDengine CLI program `taos` directly from the macOS shell to connect to the TDengine service and enter the TDengine CLI interface, as shown in the following example.
|
||||
|
||||
```text
|
||||
$ taos
|
||||
|
||||
taos> show databases;
|
||||
name |
|
||||
=================================
|
||||
information_schema |
|
||||
performance_schema |
|
||||
db |
|
||||
Query OK, 3 rows in database (0.019154s)
|
||||
|
||||
taos>
|
||||
```
|
|
@ -2,12 +2,11 @@ Go to the `C:\TDengine` directory from `cmd` and execute TDengine CLI program `t
|
|||
|
||||
```text
|
||||
taos> show databases;
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 |
|
||||
100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||
name |
|
||||
=================================
|
||||
information_schema |
|
||||
performance_schema |
|
||||
test |
|
||||
Query OK, 3 rows in database (0.123000s)
|
||||
|
||||
taos>
|
||||
|
|
|
@ -4,18 +4,20 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={4} sys="Windows" />
|
||||
|
||||
[All Downloads](../../releases)
|
||||
[All Downloads](../../releases/tdengine)
|
||||
2. Execute the installer, select the default value as prompted, and complete the installation
|
||||
3. Installation path
|
||||
|
||||
The default installation path is C:\TDengine, including the following files (directories).
|
||||
|
||||
- _taos.exe_ : TDengine CLI command-line program
|
||||
- _cfg_ : configuration file directory
|
||||
- _taos.exe_: TDengine CLI command-line program
|
||||
- _taosadapter.exe_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
|
||||
- _taosBenchmark.exe_: TDengine testing tool
|
||||
- _cfg_: configuration file directory
|
||||
- _driver_: client driver dynamic link library
|
||||
- _examples_: sample programs bash/C/C#/go/JDBC/Python/Node.js
|
||||
- _include_: header files
|
||||
- _log_ : log file
|
||||
- _log_: log file
|
||||
- _unins000.exe_: uninstaller
|
||||
|
||||
4. configure taos.cfg
|
||||
|
|
|
@ -8,13 +8,15 @@ TDengine provides a rich set of APIs (application development interface). To fac
|
|||
|
||||
## Supported platforms
|
||||
|
||||
Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux and Windows development environments. The comparison matrix is as follows.
|
||||
Currently, TDengine's native interface connectors can support platforms such as x64 and ARM hardware platforms and Linux/Windows/macOS development environments. The comparison matrix is as follows.
|
||||
|
||||
| **CPU** | **OS** | **Java** | **Python** | **Go** | **Node.js** | **C#** | **Rust** | C/C++ |
|
||||
| -------------- | --------- | -------- | ---------- | ------ | ----------- | ------ | -------- | ----- |
|
||||
| **X86 64bit** | **Linux** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **Win64** | ● | ● | ● | ● | ● | ● | ● |
|
||||
| **X86 64bit** | **macOS** | ○ | ● | ● | ○ | ○ | ● | ● |
|
||||
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
|
||||
| **ARM64** | **macOS** | ○ | ● | ● | ○ | ○ | ● | ● |
|
||||
|
||||
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.
|
||||
|
||||
|
@ -39,14 +41,14 @@ Comparing the connector support for TDengine functional features as follows.
|
|||
|
||||
### Using the native interface (taosc)
|
||||
|
||||
| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||
| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
|
||||
| ** TMQ ** | Support | Support | Support | Support | Support | Support |
|
||||
| **Schemaless** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||
| ----------------------------- | ------------- | ---------- | ------------- | ------------- | ------------- | ------------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
|
||||
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
|
||||
| **Schemaless** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
:::info
|
||||
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
|
||||
|
@ -54,16 +56,15 @@ The different database framework specifications for various programming language
|
|||
|
||||
### Use HTTP Interfaces (REST or WebSocket)
|
||||
|
||||
| **Functional Features** | **Java** | **Python** | **Go** | **C# (not supported yet)** | **Node.js** | **Rust** |
|
||||
| ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- |
|
||||
| **Connection Management** | Support | Support | Support | N/A | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | N/A | Support | Support |
|
||||
| **Continous Query ** | Support | Support | Support | N/A | Support | Support |
|
||||
| **Parameter Binding** | Not supported | Not supported | Not supported | N/A | Not supported | Support |
|
||||
| ** TMQ ** | Not supported | Not supported | Not supported | N/A | Not supported | Support |
|
||||
| **Schemaless** | Not supported | Not supported | Not supported | N/A | Not supported | Not supported |
|
||||
| **Bulk Pulling (based on WebSocket) **| Support | Support | Not Supported | N/A | Not Supported | Supported |
|
||||
| **DataFrame** | Not supported | Support | Not supported | N/A | Not supported | Not supported |
|
||||
| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
|
||||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not supported | Not supported | Not supported | Support | Not supported | Support |
|
||||
| **Subscription (TMQ) ** | Not supported | Not supported | Not supported | Not supported | Not supported | Support |
|
||||
| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Not Supported | support | Not Supported | Supported |
|
||||
| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
|
||||
|
||||
:::warning
|
||||
|
||||
|
@ -73,10 +74,12 @@ The different database framework specifications for various programming language
|
|||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import InstallOnWindows from "./_linux_install.mdx";
|
||||
import InstallOnLinux from "./_windows_install.mdx";
|
||||
import InstallOnLinux from "./_linux_install.mdx";
|
||||
import InstallOnWindows from "./_windows_install.mdx";
|
||||
import InstallOnMacOS from "./_macos_install.mdx";
|
||||
import VerifyWindows from "./_verify_windows.mdx";
|
||||
import VerifyLinux from "./_verify_linux.mdx";
|
||||
import VerifyMacOS from "./_verify_macos.mdx";
|
||||
|
||||
## Install Client Driver
|
||||
|
||||
|
@ -89,10 +92,13 @@ The client driver needs to be installed if you use the native interface connecto
|
|||
|
||||
<Tabs defaultValue="linux" groupId="os">
|
||||
<TabItem value="linux" label="Linux">
|
||||
<InstallOnWindows />
|
||||
<InstallOnLinux />
|
||||
</TabItem>
|
||||
<TabItem value="windows" label="Windows">
|
||||
<InstallOnLinux />
|
||||
<InstallOnWindows />
|
||||
</TabItem>
|
||||
<TabItem value="macos" label="MacOS">
|
||||
<InstallOnMacOS />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -107,5 +113,8 @@ After completing the above installation and configuration and you have confirmed
|
|||
<TabItem value="windows" label="Windows">
|
||||
<VerifyWindows />
|
||||
</TabItem>
|
||||
<TabItem value="macos" label="MacOS">
|
||||
<VerifyMacOS />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ taosAdapter provides the following features.
|
|||
|
||||
### Install taosAdapter
|
||||
|
||||
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
||||
If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
|
||||
|
||||
### Start/Stop taosAdapter
|
||||
|
||||
|
@ -196,7 +196,8 @@ Support InfluxDB query parameters as follows.
|
|||
- `u` TDengine user name
|
||||
- `p` TDengine password
|
||||
|
||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
|
||||
### OpenTSDB
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ toc_max_heading_level: 4
|
|||
description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine."
|
||||
---
|
||||
|
||||
## Introduction
|
||||
# Introduction
|
||||
|
||||
taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can be configured to generate user defined databases, supertables, subtables, and the time series data to populate these for performance benchmarking. taosBenchmark is highly configurable and some of the configurations include the time interval for inserting data, the number of working threads and the capability to insert disordered data. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users.
|
||||
|
||||
|
@ -23,7 +23,7 @@ There are two ways to install taosBenchmark:
|
|||
|
||||
TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
|
||||
|
||||
taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. If you want to test the performance of queries or data subscriptionm configure taosBenchmark with the configuration file. You can modify the value of the `filetype` parameter to specify the function that you want to test.
|
||||
taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. If you want to test the performance of queries or data subscription configure taosBenchmark with the configuration file. You can modify the value of the `filetype` parameter to specify the function that you want to test.
|
||||
|
||||
**Make sure that the TDengine cluster is running correctly before running taosBenchmark. **
|
||||
|
||||
|
@ -112,6 +112,9 @@ taosBenchmark -f <json file>
|
|||
- **-u/--user <user\>** :
|
||||
User name to connect to the TDengine server. Default is root.
|
||||
|
||||
- **-U/--supplement-insert ** :
|
||||
Supplementally insert data without create database and table, optional, default is off.
|
||||
|
||||
- **-p/--password <passwd\>** :
|
||||
The default password to connect to the TDengine server is `taosdata`.
|
||||
|
||||
|
@ -148,6 +151,9 @@ taosBenchmark -f <json file>
|
|||
- **-l/--columns <colNum\>** :
|
||||
specify the number of columns in the super table. If both this parameter and `-b/--data-type` is set, the final result number of columns is the greater of the two. If the number specified by this parameter is greater than the number of columns specified by `-b/--data-type`, the unspecified column type defaults to INT, for example: `-l 5 -b float,double`, then the final column is `FLOAT,DOUBLE,INT,INT,INT`. If the number of columns specified is less than or equal to the number of columns specified by `-b/--data-type`, then the result is the column and type specified by `-b/--data-type`, e.g.: `-l 3 -b float,double,float,bigint`. The last column is `FLOAT,DOUBLE, FLOAT,BIGINT`.
|
||||
|
||||
- **-L/--partial-col-num <colNum\> ** :
|
||||
Specify first numbers of columns has data. Rest of columns' data are NULL. Default is all columns have data.
|
||||
|
||||
- **-A/--tag-type <tagType\>** :
|
||||
The tag column type of the super table. nchar and binary types can both set the length, for example:
|
||||
|
||||
|
@ -231,7 +237,7 @@ The parameters related to database creation are configured in `dbinfo` in the js
|
|||
|
||||
- **name**: specify the name of the database.
|
||||
|
||||
- **drop**: indicate whether to delete the database before inserting. The default is true.
|
||||
- **drop**: indicate whether to delete the database before inserting. The value can be 'yes' or 'no'. No means do not drop. The default is to drop.
|
||||
|
||||
#### Stream processing related configuration parameters
|
||||
|
||||
|
@ -334,13 +340,13 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
|
||||
- **name** : The name of the column, if used together with count, e.g. "name": "current", "count":3, then the names of the 3 columns are current, current_2. current_3.
|
||||
|
||||
- **min**: The minimum value of the column/label of the data type.
|
||||
- **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value.
|
||||
|
||||
- **max**: The maximum value of the column/label of the data type.
|
||||
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maxium value.
|
||||
|
||||
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
||||
|
||||
- **sma**: Insert the column into the BSMA. Enter `yes` or `no`. The default is `no`.
|
||||
- **sma**: Insert the column into the SMA. Enter `yes` or `no`. The default is `no`.
|
||||
|
||||
#### insertion behavior configuration parameters
|
||||
|
||||
|
|
|
@ -211,7 +211,7 @@
|
|||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Leader MNode",
|
||||
"title": "Master MNode",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "filterByValue",
|
||||
|
@ -221,7 +221,7 @@
|
|||
"config": {
|
||||
"id": "regex",
|
||||
"options": {
|
||||
"value": "leader"
|
||||
"value": "master"
|
||||
}
|
||||
},
|
||||
"fieldName": "role"
|
||||
|
@ -300,7 +300,7 @@
|
|||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Leader MNode Create Time",
|
||||
"title": "Master MNode Create Time",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "filterByValue",
|
||||
|
@ -310,7 +310,7 @@
|
|||
"config": {
|
||||
"id": "regex",
|
||||
"options": {
|
||||
"value": "leader"
|
||||
"value": "master"
|
||||
}
|
||||
},
|
||||
"fieldName": "role"
|
||||
|
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 5.5 KiB |
Before Width: | Height: | Size: 8.1 KiB After Width: | Height: | Size: 6.9 KiB |
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 7.5 KiB After Width: | Height: | Size: 7.1 KiB |
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 10 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 6.2 KiB |
After Width: | Height: | Size: 5.3 KiB |
After Width: | Height: | Size: 23 KiB |
|
@ -153,7 +153,7 @@
|
|||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Leader MNode",
|
||||
"title": "Master MNode",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "filterByValue",
|
||||
|
@ -163,7 +163,7 @@
|
|||
"config": {
|
||||
"id": "regex",
|
||||
"options": {
|
||||
"value": "leader"
|
||||
"value": "master"
|
||||
}
|
||||
},
|
||||
"fieldName": "role"
|
||||
|
@ -246,7 +246,7 @@
|
|||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Leader MNode Create Time",
|
||||
"title": "Master MNode Create Time",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "filterByValue",
|
||||
|
@ -256,7 +256,7 @@
|
|||
"config": {
|
||||
"id": "regex",
|
||||
"options": {
|
||||
"value": "leader"
|
||||
"value": "master"
|
||||
}
|
||||
},
|
||||
"fieldName": "role"
|
||||
|
|
|
@ -5,15 +5,23 @@ sidebar_label: TDinsight
|
|||
|
||||
TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana].
|
||||
|
||||
After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script.
|
||||
After TDengine starts, it automatically writes many metrics in specific intervals into a designated database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script.
|
||||
|
||||
## System Requirements
|
||||
|
||||
To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`).
|
||||
To deploy TDinsight, we need
|
||||
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
|
||||
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
|
||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
|
||||
|
||||
Please record
|
||||
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
|
||||
- Authentication of taosAdapter, e.g. user name and password
|
||||
- The database name used by taosKeeper to store monitoring data
|
||||
|
||||
## Installing Grafana
|
||||
|
||||
We recommend using the latest [Grafana] version 7 or 8 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana].
|
||||
We recommend using the latest [Grafana] version 8 or 9 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana].
|
||||
|
||||
### Installing Grafana on Debian or Ubuntu
|
||||
|
||||
|
@ -71,7 +79,7 @@ chmod +x TDinsight.sh
|
|||
./TDinsight.sh
|
||||
```
|
||||
|
||||
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
|
||||
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
|
||||
|
||||
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
|
||||
|
||||
|
@ -106,18 +114,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
|
||||
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
|
||||
|
||||
Alibaba Cloud SMS as Notifier:
|
||||
-s, --sms-enabled To enable tdengine-datasource plugin builtin Alibaba Cloud SMS webhook.
|
||||
-N, --sms-notifier-name <string> Provisioning notifier name.[default: TDinsight Builtin SMS]
|
||||
-U, --sms-notifier-uid <string> Provisioning notifier uid, use lowercase notifier name by default.
|
||||
-D, --sms-notifier-is-default Set notifier as default.
|
||||
-I, --sms-access-key-id <string> Alibaba Cloud SMS access key id
|
||||
-K, --sms-access-key-secret <string> Alibaba Cloud SMS access key secret
|
||||
-S, --sms-sign-name <string> Sign name
|
||||
-C, --sms-template-code <string> Template code
|
||||
-T, --sms-template-param <string> Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
|
||||
-B, --sms-phone-numbers <string> Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx"
|
||||
-L, --sms-listen-addr <string> [default: 127.0.0.1:9100]
|
||||
```
|
||||
|
||||
Most command-line options can take effect the same as environment variables.
|
||||
|
@ -136,17 +132,6 @@ Most command-line options can take effect the same as environment variables.
|
|||
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title
|
||||
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external
|
||||
| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s
|
||||
| -s | --sms-enabled | SMS_ENABLED | Enable the tdengine-datasource plugin built into Alibaba Cloud SMS webhook. | -s
|
||||
| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | The name of the provisioning notifier. [Default: `TDinsight Builtin SMS`] | -U
|
||||
| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`, lowercase of the program name is used by default, other characters are replaced by "-". |-sms
|
||||
| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | Set built-in SMS notification to default value. |-sms-notifier-is-default
|
||||
| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | Alibaba Cloud SMS access key id |
|
||||
| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | AliCloud SMS-access-secret-key |
|
||||
| -S | --sms-sign-name | SMS_SIGN_NAME | Signature |
|
||||
| -C | --sms-template-code | SMS_TEMPLATE_CODE | Template code |
|
||||
| -T | --sms-template-param | SMS_TEMPLATE_PARAM | JSON template for template parameters |
|
||||
| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | A comma-separated list of phone numbers, e.g. `"189xxxxxxxx,132xxxxxxxx"` |
|
||||
| -L | --sms-listen-addr | SMS_LISTEN_ADDR | Built-in SMS webhook listener address, default is `127.0.0.1:9100` |
|
||||
|
||||
Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script.
|
||||
|
||||
|
@ -166,24 +151,10 @@ Use the `uid` value obtained above as `-E` input.
|
|||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
```
|
||||
|
||||
If you want to use the [Alibaba Cloud SMS](https://www.aliyun.com/product/sms) service as a notification channel, you should enable it with the `-s` flag add the following parameters.
|
||||
|
||||
- `-N`: Notification Channel name, default is `TDinsight Builtin SMS`.
|
||||
- `-U`: Channel uid, default is lowercase of `name`, any other character is replaced with -, for the default `-N`, its uid is `tdinsight-builtin-sms`.
|
||||
- `-I`: Alibaba Cloud SMS access key id.
|
||||
- `-K`: Alibaba Cloud SMS access secret key.
|
||||
- `-S`: Alibaba Cloud SMS signature.
|
||||
- `-C`: Alibaba Cloud SMS template id.
|
||||
- `-T`: Alibaba Cloud SMS template parameters, for JSON format template, example is as follows `'{"alarm_level":"%s", "time":"%s", "name":"%s", "content":"%s"}'`. There are four parameters: alarm level, time, name and alarm content.
|
||||
- `-B`: a list of phone numbers, separated by a comma `,`.
|
||||
|
||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||
|
||||
```bash
|
||||
sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
|
||||
# If using built-in SMS notifications
|
||||
sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \
|
||||
-s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611
|
||||
```
|
||||
|
||||
Please note that the configuration data source, notification channel, and dashboard are not changeable on the front end. You should update the configuration again via this script or manually change the configuration file in the `/etc/grafana/provisioning` directory (this is the default directory for Grafana, use the `-P` option to change it as needed).
|
||||
|
@ -249,21 +220,23 @@ Save and test. It will report 'TDengine Data source is working' under normal cir
|
|||
|
||||
### Importing dashboards
|
||||
|
||||
Point to **+** / **Create** - **import** (or `/dashboard/import` url).
|
||||
In the page of configuring data source, click **Dashboards** tab.
|
||||
|
||||

|
||||
|
||||
Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**.
|
||||
Choose `TDengine for 3.x` and click `import`.
|
||||
|
||||

|
||||
After the importing is done, `TDinsight for 3.x` dashboard is available on the page of `search dashboards by name`.
|
||||
|
||||
Once the import is complete, the full page view of TDinsight is shown below.
|
||||

|
||||
|
||||

|
||||
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
|
||||
|
||||

|
||||
|
||||
## TDinsight dashboard details
|
||||
|
||||
The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases.
|
||||
The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources, e.g. dnodes, mnodes, vnodes and databases.
|
||||
|
||||
Details of the metrics are as follows.
|
||||
|
||||
|
@ -285,7 +258,6 @@ This section contains the current information and status of the cluster, the ale
|
|||
- **Measuring Points Used**: The number of measuring points used to enable the alert rule (no data available in the community version, healthy by default).
|
||||
- **Grants Expire Time**: the expiration time of the enterprise version of the enabled alert rule (no data available for the community version, healthy by default).
|
||||
- **Error Rate**: Aggregate error rate (average number of errors per second) for alert-enabled clusters.
|
||||
- **Variables**: `show variables` table display.
|
||||
|
||||
### DNodes Status
|
||||
|
||||
|
@ -294,7 +266,6 @@ This section contains the current information and status of the cluster, the ale
|
|||
- **DNodes Status**: simple table view of `show dnodes`.
|
||||
- **DNodes Lifetime**: the time elapsed since the dnode was created.
|
||||
- **DNodes Number**: the number of DNodes changes.
|
||||
- **Offline Reason**: if any dnode status is offline, the reason for offline is shown as a pie chart.
|
||||
|
||||
### MNode Overview
|
||||
|
||||
|
@ -309,7 +280,6 @@ This section contains the current information and status of the cluster, the ale
|
|||
|
||||
1. **Requests Rate(Inserts per Second)**: average number of inserts per second.
|
||||
2. **Requests (Selects)**: number of query requests and change rate (count of second).
|
||||
3. **Requests (HTTP)**: number of HTTP requests and request rate (count of second).
|
||||
|
||||
### Database
|
||||
|
||||
|
@ -319,9 +289,8 @@ Database usage, repeated for each value of the variable `$database` i.e. multipl
|
|||
|
||||
1. **STables**: number of super tables.
|
||||
2. **Total Tables**: number of all tables.
|
||||
3. **Sub Tables**: the number of all super table subtables.
|
||||
4. **Tables**: graph of all normal table numbers over time.
|
||||
5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups.
|
||||
3. **Tables**: number of normal tables.
|
||||
4. **Table number for each vgroup**: number of tables per vgroup.
|
||||
|
||||
### DNode Resource Usage
|
||||
|
||||
|
@ -356,12 +325,11 @@ Currently, only the number of logins per minute is reported.
|
|||
|
||||
Support monitoring taosAdapter request statistics and status details. Includes.
|
||||
|
||||
1. **http_request**: contains the total number of requests, the number of failed requests, and the number of requests being processed
|
||||
2. **top 3 request endpoint**: data of the top 3 requests by endpoint group
|
||||
3. **Memory Used**: taosAdapter memory usage
|
||||
4. **latency_quantile(ms)**: quantile of (1, 2, 5, 9, 99) stages
|
||||
5. **top 3 failed request endpoint**: data of the top 3 failed requests by endpoint grouping
|
||||
6. **CPU Used**: taosAdapter CPU usage
|
||||
1. **http_request_inflight**: number of real-time requests.
|
||||
2. **http_request_total**: number of total requests.
|
||||
3. **http_request_fail**: number of failed requets.
|
||||
4. **CPU Used**: CPU usage of taosAdapter.
|
||||
5. **Memory Used**: Memory usage of taosAdapter.
|
||||
|
||||
## Upgrade
|
||||
|
||||
|
@ -403,13 +371,6 @@ services:
|
|||
TDENGINE_API: ${TDENGINE_API}
|
||||
TDENGINE_USER: ${TDENGINE_USER}
|
||||
TDENGINE_PASS: ${TDENGINE_PASS}
|
||||
SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID}
|
||||
SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET}
|
||||
SMS_SIGN_NAME: ${SMS_SIGN_NAME}
|
||||
SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE}
|
||||
SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}'
|
||||
SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS
|
||||
SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR}
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
|
|
|
@ -12,7 +12,7 @@ If executed on the TDengine server-side, there is no need for additional install
|
|||
|
||||
## Execution
|
||||
|
||||
To access the TDengine CLI, you can execute `taos` command-line utility from a Linux terminal or Windows terminal.
|
||||
To access the TDengine CLI, you can execute `taos` command-line utility from a terminal.
|
||||
|
||||
```bash
|
||||
taos
|
||||
|
|
|
@ -5,28 +5,28 @@ description: "List of platforms supported by TDengine server, client, and connec
|
|||
|
||||
## List of supported platforms for TDengine server
|
||||
|
||||
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** |
|
||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- |
|
||||
| X64 | ● | ● | ● | ● |
|
||||
| ARM64 | | | ● | |
|
||||
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **macOS** |
|
||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
|
||||
| X64 | ● | ● | ● | ● | ● |
|
||||
| ARM64 | | | ● | | ● |
|
||||
|
||||
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
||||
|
||||
## List of supported platforms for TDengine clients and connectors
|
||||
|
||||
TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha hardware platforms and Linux/Win64/Win32 development environments.
|
||||
TDengine's connector can support a wide range of platforms, including X64/X86/ARM64/ARM32/MIPS/Alpha hardware platforms and Linux/Win64/Win32/macOS development environments.
|
||||
|
||||
The comparison matrix is as follows.
|
||||
|
||||
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** |
|
||||
| ----------- | ------------- | ------------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● |
|
||||
| **Go** | ● | ● | ● |
|
||||
| **NodeJs** | ● | ● | ● |
|
||||
| **C#** | ● | ● | ○ |
|
||||
| **RESTful** | ● | ● | ● |
|
||||
| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** | **X64 64bit** | **ARM64** |
|
||||
| ----------- | ------------- | ------------- | --------- | ------------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** |
|
||||
| **C/C++** | ● | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ○ |
|
||||
| **Python** | ● | ● | ● | ● | ● |
|
||||
| **Go** | ● | ● | ● | ● | ● |
|
||||
| **NodeJs** | ● | ● | ● | ○ | ○ |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ |
|
||||
| **RESTful** | ● | ● | ● | ● | ● |
|
||||
|
||||
Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified.
|
||||
|
|
|
@ -25,10 +25,11 @@ The TDengine client taos can be executed in this container to access TDengine us
|
|||
$ docker exec -it tdengine taos
|
||||
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
||||
====================================================================================================================================================================================================================================================================================
|
||||
log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
|
||||
Query OK, 1 row(s) in set (0.002843s)
|
||||
name |
|
||||
=================================
|
||||
information_schema |
|
||||
performance_schema |
|
||||
Query OK, 2 row(s) in set (0.002843s)
|
||||
```
|
||||
|
||||
The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios.
|
||||
|
|
|
@ -164,7 +164,7 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Client only |
|
||||
| 含义 | SMA index optimization policy |
|
||||
| Meaning | SMA index optimization policy |
|
||||
| Unit | None |
|
||||
| Default Value | 0 |
|
||||
| Notes |
|
||||
|
@ -177,12 +177,21 @@ The parameters described in this document by the effect that they have on the sy
|
|||
### maxNumOfDistinctRes
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------- | --- |
|
||||
| -------- | -------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The maximum number of distinct rows returned |
|
||||
| Value Range | [100,000 - 100,000,000] |
|
||||
| Default Value | 100,000 |
|
||||
|
||||
### keepColumnName
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||
| Default Value | 0 |
|
||||
|
||||
## Locale Parameters
|
||||
|
||||
### timezone
|
||||
|
@ -196,7 +205,7 @@ The parameters described in this document by the effect that they have on the sy
|
|||
:::info
|
||||
To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. Note that Unix timestamps are converted and recorded on the client side. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
|
||||
|
||||
On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
|
||||
On Linux/macOS, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
|
||||
|
||||
```
|
||||
timezone UTC-8
|
||||
|
@ -239,9 +248,9 @@ To avoid the problems of using time strings, Unix timestamp can be used directly
|
|||
:::info
|
||||
A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. Note that the correct encoding is determined by the user. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
|
||||
|
||||
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
|
||||
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux/macOS, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
|
||||
|
||||
The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
|
||||
The locale definition standard on Linux/macOS is: <Language\>\_<Region\>.<charset\>, for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux/macOS, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux/macOS to specify the charset.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -254,9 +263,9 @@ The locale definition standard on Linux is: <Language\>\_<Region\>.<charset\>, f
|
|||
| Default Value | charset set in the system |
|
||||
|
||||
:::info
|
||||
On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
|
||||
On Linux/macOS, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
|
||||
|
||||
So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
|
||||
So on Linux/macOS, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
|
@ -270,7 +279,7 @@ charset CP936
|
|||
|
||||
Refer to the documentation for your operating system before changing the charset.
|
||||
|
||||
On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
|
||||
On a Linux/macOS, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
|
@ -325,7 +334,7 @@ The charset that takes effect is UTF-8.
|
|||
| Applicable | Server Only |
|
||||
| Meaning | Maximum number of vnodes per dnode |
|
||||
| Value Range | 0-4096 |
|
||||
| Default Value | 256 |
|
||||
| Default Value | 2x the CPU cores |
|
||||
|
||||
## Time Parameters
|
||||
|
||||
|
@ -666,7 +675,7 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
|||
| Meaning | Whether to generate core file when server crashes |
|
||||
| Value Range | 0: false, 1: true |
|
||||
| Default Value | 1 |
|
||||
| Note | The core file is generated under root directory `systemctl start taosd` is used to start, or under the working directory if `taosd` is started directly on Linux Shell. |
|
||||
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
|
||||
|
||||
### udf
|
||||
|
||||
|
@ -697,152 +706,154 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
|||
| 15 | telemetryPort | No | Yes |
|
||||
| 16 | queryPolicy | No | Yes |
|
||||
| 17 | querySmaOptimize | No | Yes |
|
||||
| 18 | queryBufferSize | Yes | Yes |
|
||||
| 19 | maxNumOfDistinctRes | Yes | Yes |
|
||||
| 20 | minSlidingTime | Yes | Yes |
|
||||
| 21 | minIntervalTime | Yes | Yes |
|
||||
| 22 | countAlwaysReturnValue | Yes | Yes |
|
||||
| 23 | dataDir | Yes | Yes |
|
||||
| 24 | minimalDataDirGB | Yes | Yes |
|
||||
| 25 | supportVnodes | No | Yes |
|
||||
| 26 | tempDir | Yes | Yes |
|
||||
| 27 | minimalTmpDirGB | Yes | Yes |
|
||||
| 28 | compressMsgSize | Yes | Yes |
|
||||
| 29 | compressColData | Yes | Yes |
|
||||
| 30 | smlChildTableName | Yes | Yes |
|
||||
| 31 | smlTagName | Yes | Yes |
|
||||
| 32 | smlDataFormat | No | Yes |
|
||||
| 33 | statusInterval | Yes | Yes |
|
||||
| 34 | shellActivityTimer | Yes | Yes |
|
||||
| 35 | transPullupInterval | No | Yes |
|
||||
| 36 | mqRebalanceInterval | No | Yes |
|
||||
| 37 | ttlUnit | No | Yes |
|
||||
| 38 | ttlPushInterval | No | Yes |
|
||||
| 39 | numOfTaskQueueThreads | No | Yes |
|
||||
| 40 | numOfRpcThreads | No | Yes |
|
||||
| 41 | numOfCommitThreads | Yes | Yes |
|
||||
| 42 | numOfMnodeReadThreads | No | Yes |
|
||||
| 43 | numOfVnodeQueryThreads | No | Yes |
|
||||
| 44 | numOfVnodeStreamThreads | No | Yes |
|
||||
| 45 | numOfVnodeFetchThreads | No | Yes |
|
||||
| 46 | numOfVnodeWriteThreads | No | Yes |
|
||||
| 47 | numOfVnodeSyncThreads | No | Yes |
|
||||
| 48 | numOfQnodeQueryThreads | No | Yes |
|
||||
| 49 | numOfQnodeFetchThreads | No | Yes |
|
||||
| 50 | numOfSnodeSharedThreads | No | Yes |
|
||||
| 51 | numOfSnodeUniqueThreads | No | Yes |
|
||||
| 52 | rpcQueueMemoryAllowed | No | Yes |
|
||||
| 53 | logDir | Yes | Yes |
|
||||
| 54 | minimalLogDirGB | Yes | Yes |
|
||||
| 55 | numOfLogLines | Yes | Yes |
|
||||
| 56 | asyncLog | Yes | Yes |
|
||||
| 57 | logKeepDays | Yes | Yes |
|
||||
| 58 | debugFlag | Yes | Yes |
|
||||
| 59 | tmrDebugFlag | Yes | Yes |
|
||||
| 60 | uDebugFlag | Yes | Yes |
|
||||
| 61 | rpcDebugFlag | Yes | Yes |
|
||||
| 62 | jniDebugFlag | Yes | Yes |
|
||||
| 63 | qDebugFlag | Yes | Yes |
|
||||
| 64 | cDebugFlag | Yes | Yes |
|
||||
| 65 | dDebugFlag | Yes | Yes |
|
||||
| 66 | vDebugFlag | Yes | Yes |
|
||||
| 67 | mDebugFlag | Yes | Yes |
|
||||
| 68 | wDebugFlag | Yes | Yes |
|
||||
| 69 | sDebugFlag | Yes | Yes |
|
||||
| 70 | tsdbDebugFlag | Yes | Yes |
|
||||
| 71 | tqDebugFlag | No | Yes |
|
||||
| 72 | fsDebugFlag | Yes | Yes |
|
||||
| 73 | udfDebugFlag | No | Yes |
|
||||
| 74 | smaDebugFlag | No | Yes |
|
||||
| 75 | idxDebugFlag | No | Yes |
|
||||
| 76 | tdbDebugFlag | No | Yes |
|
||||
| 77 | metaDebugFlag | No | Yes |
|
||||
| 78 | timezone | Yes | Yes |
|
||||
| 79 | locale | Yes | Yes |
|
||||
| 80 | charset | Yes | Yes |
|
||||
| 81 | udf | Yes | Yes |
|
||||
| 82 | enableCoreFile | Yes | Yes |
|
||||
| 83 | arbitrator | Yes | No |
|
||||
| 84 | numOfThreadsPerCore | Yes | No |
|
||||
| 85 | numOfMnodes | Yes | No |
|
||||
| 86 | vnodeBak | Yes | No |
|
||||
| 87 | balance | Yes | No |
|
||||
| 88 | balanceInterval | Yes | No |
|
||||
| 89 | offlineThreshold | Yes | No |
|
||||
| 90 | role | Yes | No |
|
||||
| 91 | dnodeNopLoop | Yes | No |
|
||||
| 92 | keepTimeOffset | Yes | No |
|
||||
| 93 | rpcTimer | Yes | No |
|
||||
| 94 | rpcMaxTime | Yes | No |
|
||||
| 95 | rpcForceTcp | Yes | No |
|
||||
| 96 | tcpConnTimeout | Yes | No |
|
||||
| 97 | syncCheckInterval | Yes | No |
|
||||
| 98 | maxTmrCtrl | Yes | No |
|
||||
| 99 | monitorReplica | Yes | No |
|
||||
| 100 | smlTagNullName | Yes | No |
|
||||
| 101 | keepColumnName | Yes | No |
|
||||
| 102 | ratioOfQueryCores | Yes | No |
|
||||
| 103 | maxStreamCompDelay | Yes | No |
|
||||
| 104 | maxFirstStreamCompDelay | Yes | No |
|
||||
| 105 | retryStreamCompDelay | Yes | No |
|
||||
| 106 | streamCompDelayRatio | Yes | No |
|
||||
| 107 | maxVgroupsPerDb | Yes | No |
|
||||
| 108 | maxTablesPerVnode | Yes | No |
|
||||
| 109 | minTablesPerVnode | Yes | No |
|
||||
| 110 | tableIncStepPerVnode | Yes | No |
|
||||
| 111 | cache | Yes | No |
|
||||
| 112 | blocks | Yes | No |
|
||||
| 113 | days | Yes | No |
|
||||
| 114 | keep | Yes | No |
|
||||
| 115 | minRows | Yes | No |
|
||||
| 116 | maxRows | Yes | No |
|
||||
| 117 | quorum | Yes | No |
|
||||
| 118 | comp | Yes | No |
|
||||
| 119 | walLevel | Yes | No |
|
||||
| 120 | fsync | Yes | No |
|
||||
| 121 | replica | Yes | No |
|
||||
| 122 | partitions | Yes | No |
|
||||
| 123 | quorum | Yes | No |
|
||||
| 124 | update | Yes | No |
|
||||
| 125 | cachelast | Yes | No |
|
||||
| 126 | maxSQLLength | Yes | No |
|
||||
| 127 | maxWildCardsLength | Yes | No |
|
||||
| 128 | maxRegexStringLen | Yes | No |
|
||||
| 129 | maxNumOfOrderedRes | Yes | No |
|
||||
| 130 | maxConnections | Yes | No |
|
||||
| 131 | mnodeEqualVnodeNum | Yes | No |
|
||||
| 132 | http | Yes | No |
|
||||
| 133 | httpEnableRecordSql | Yes | No |
|
||||
| 134 | httpMaxThreads | Yes | No |
|
||||
| 135 | restfulRowLimit | Yes | No |
|
||||
| 136 | httpDbNameMandatory | Yes | No |
|
||||
| 137 | httpKeepAlive | Yes | No |
|
||||
| 138 | enableRecordSql | Yes | No |
|
||||
| 139 | maxBinaryDisplayWidth | Yes | No |
|
||||
| 140 | stream | Yes | No |
|
||||
| 141 | retrieveBlockingModel | Yes | No |
|
||||
| 142 | tsdbMetaCompactRatio | Yes | No |
|
||||
| 143 | defaultJSONStrType | Yes | No |
|
||||
| 144 | walFlushSize | Yes | No |
|
||||
| 145 | keepTimeOffset | Yes | No |
|
||||
| 146 | flowctrl | Yes | No |
|
||||
| 147 | slaveQuery | Yes | No |
|
||||
| 148 | adjustMaster | Yes | No |
|
||||
| 149 | topicBinaryLen | Yes | No |
|
||||
| 150 | telegrafUseFieldNum | Yes | No |
|
||||
| 151 | deadLockKillQuery | Yes | No |
|
||||
| 152 | clientMerge | Yes | No |
|
||||
| 153 | sdbDebugFlag | Yes | No |
|
||||
| 154 | odbcDebugFlag | Yes | No |
|
||||
| 155 | httpDebugFlag | Yes | No |
|
||||
| 156 | monDebugFlag | Yes | No |
|
||||
| 157 | cqDebugFlag | Yes | No |
|
||||
| 158 | shortcutFlag | Yes | No |
|
||||
| 159 | probeSeconds | Yes | No |
|
||||
| 160 | probeKillSeconds | Yes | No |
|
||||
| 161 | probeInterval | Yes | No |
|
||||
| 162 | lossyColumns | Yes | No |
|
||||
| 163 | fPrecision | Yes | No |
|
||||
| 164 | dPrecision | Yes | No |
|
||||
| 165 | maxRange | Yes | No |
|
||||
| 166 | range | Yes | No |
|
||||
| 18 | queryRsmaTolerance | No | Yes |
|
||||
| 19 | queryBufferSize | Yes | Yes |
|
||||
| 20 | maxNumOfDistinctRes | Yes | Yes |
|
||||
| 21 | minSlidingTime | Yes | Yes |
|
||||
| 22 | minIntervalTime | Yes | Yes |
|
||||
| 23 | countAlwaysReturnValue | Yes | Yes |
|
||||
| 24 | dataDir | Yes | Yes |
|
||||
| 25 | minimalDataDirGB | Yes | Yes |
|
||||
| 26 | supportVnodes | No | Yes |
|
||||
| 27 | tempDir | Yes | Yes |
|
||||
| 28 | minimalTmpDirGB | Yes | Yes |
|
||||
| 29 | compressMsgSize | Yes | Yes |
|
||||
| 30 | compressColData | Yes | Yes |
|
||||
| 31 | smlChildTableName | Yes | Yes |
|
||||
| 32 | smlTagName | Yes | Yes |
|
||||
| 33 | smlDataFormat | No | Yes |
|
||||
| 34 | statusInterval | Yes | Yes |
|
||||
| 35 | shellActivityTimer | Yes | Yes |
|
||||
| 36 | transPullupInterval | No | Yes |
|
||||
| 37 | mqRebalanceInterval | No | Yes |
|
||||
| 38 | ttlUnit | No | Yes |
|
||||
| 39 | ttlPushInterval | No | Yes |
|
||||
| 40 | numOfTaskQueueThreads | No | Yes |
|
||||
| 41 | numOfRpcThreads | No | Yes |
|
||||
| 42 | numOfCommitThreads | Yes | Yes |
|
||||
| 43 | numOfMnodeReadThreads | No | Yes |
|
||||
| 44 | numOfVnodeQueryThreads | No | Yes |
|
||||
| 45 | numOfVnodeStreamThreads | No | Yes |
|
||||
| 46 | numOfVnodeFetchThreads | No | Yes |
|
||||
| 47 | numOfVnodeWriteThreads | No | Yes |
|
||||
| 48 | numOfVnodeSyncThreads | No | Yes |
|
||||
| 49 | numOfVnodeRsmaThreads | No | Yes |
|
||||
| 50 | numOfQnodeQueryThreads | No | Yes |
|
||||
| 51 | numOfQnodeFetchThreads | No | Yes |
|
||||
| 52 | numOfSnodeSharedThreads | No | Yes |
|
||||
| 53 | numOfSnodeUniqueThreads | No | Yes |
|
||||
| 54 | rpcQueueMemoryAllowed | No | Yes |
|
||||
| 55 | logDir | Yes | Yes |
|
||||
| 56 | minimalLogDirGB | Yes | Yes |
|
||||
| 57 | numOfLogLines | Yes | Yes |
|
||||
| 58 | asyncLog | Yes | Yes |
|
||||
| 59 | logKeepDays | Yes | Yes |
|
||||
| 60 | debugFlag | Yes | Yes |
|
||||
| 61 | tmrDebugFlag | Yes | Yes |
|
||||
| 62 | uDebugFlag | Yes | Yes |
|
||||
| 63 | rpcDebugFlag | Yes | Yes |
|
||||
| 64 | jniDebugFlag | Yes | Yes |
|
||||
| 65 | qDebugFlag | Yes | Yes |
|
||||
| 66 | cDebugFlag | Yes | Yes |
|
||||
| 67 | dDebugFlag | Yes | Yes |
|
||||
| 68 | vDebugFlag | Yes | Yes |
|
||||
| 69 | mDebugFlag | Yes | Yes |
|
||||
| 70 | wDebugFlag | Yes | Yes |
|
||||
| 71 | sDebugFlag | Yes | Yes |
|
||||
| 72 | tsdbDebugFlag | Yes | Yes |
|
||||
| 73 | tqDebugFlag | No | Yes |
|
||||
| 74 | fsDebugFlag | Yes | Yes |
|
||||
| 75 | udfDebugFlag | No | Yes |
|
||||
| 76 | smaDebugFlag | No | Yes |
|
||||
| 77 | idxDebugFlag | No | Yes |
|
||||
| 78 | tdbDebugFlag | No | Yes |
|
||||
| 79 | metaDebugFlag | No | Yes |
|
||||
| 80 | timezone | Yes | Yes |
|
||||
| 81 | locale | Yes | Yes |
|
||||
| 82 | charset | Yes | Yes |
|
||||
| 83 | udf | Yes | Yes |
|
||||
| 84 | enableCoreFile | Yes | Yes |
|
||||
| 85 | arbitrator | Yes | No |
|
||||
| 86 | numOfThreadsPerCore | Yes | No |
|
||||
| 87 | numOfMnodes | Yes | No |
|
||||
| 88 | vnodeBak | Yes | No |
|
||||
| 89 | balance | Yes | No |
|
||||
| 90 | balanceInterval | Yes | No |
|
||||
| 91 | offlineThreshold | Yes | No |
|
||||
| 92 | role | Yes | No |
|
||||
| 93 | dnodeNopLoop | Yes | No |
|
||||
| 94 | keepTimeOffset | Yes | No |
|
||||
| 95 | rpcTimer | Yes | No |
|
||||
| 96 | rpcMaxTime | Yes | No |
|
||||
| 97 | rpcForceTcp | Yes | No |
|
||||
| 98 | tcpConnTimeout | Yes | No |
|
||||
| 99 | syncCheckInterval | Yes | No |
|
||||
| 100 | maxTmrCtrl | Yes | No |
|
||||
| 101 | monitorReplica | Yes | No |
|
||||
| 102 | smlTagNullName | Yes | No |
|
||||
| 103 | keepColumnName | Yes | No |
|
||||
| 104 | ratioOfQueryCores | Yes | No |
|
||||
| 105 | maxStreamCompDelay | Yes | No |
|
||||
| 106 | maxFirstStreamCompDelay | Yes | No |
|
||||
| 107 | retryStreamCompDelay | Yes | No |
|
||||
| 108 | streamCompDelayRatio | Yes | No |
|
||||
| 109 | maxVgroupsPerDb | Yes | No |
|
||||
| 110 | maxTablesPerVnode | Yes | No |
|
||||
| 111 | minTablesPerVnode | Yes | No |
|
||||
| 112 | tableIncStepPerVnode | Yes | No |
|
||||
| 113 | cache | Yes | No |
|
||||
| 114 | blocks | Yes | No |
|
||||
| 115 | days | Yes | No |
|
||||
| 116 | keep | Yes | No |
|
||||
| 117 | minRows | Yes | No |
|
||||
| 118 | maxRows | Yes | No |
|
||||
| 119 | quorum | Yes | No |
|
||||
| 120 | comp | Yes | No |
|
||||
| 121 | walLevel | Yes | No |
|
||||
| 122 | fsync | Yes | No |
|
||||
| 123 | replica | Yes | No |
|
||||
| 124 | partitions | Yes | No |
|
||||
| 125 | quorum | Yes | No |
|
||||
| 126 | update | Yes | No |
|
||||
| 127 | cachelast | Yes | No |
|
||||
| 128 | maxSQLLength | Yes | No |
|
||||
| 129 | maxWildCardsLength | Yes | No |
|
||||
| 130 | maxRegexStringLen | Yes | No |
|
||||
| 131 | maxNumOfOrderedRes | Yes | No |
|
||||
| 132 | maxConnections | Yes | No |
|
||||
| 133 | mnodeEqualVnodeNum | Yes | No |
|
||||
| 134 | http | Yes | No |
|
||||
| 135 | httpEnableRecordSql | Yes | No |
|
||||
| 136 | httpMaxThreads | Yes | No |
|
||||
| 137 | restfulRowLimit | Yes | No |
|
||||
| 138 | httpDbNameMandatory | Yes | No |
|
||||
| 139 | httpKeepAlive | Yes | No |
|
||||
| 140 | enableRecordSql | Yes | No |
|
||||
| 141 | maxBinaryDisplayWidth | Yes | No |
|
||||
| 142 | stream | Yes | No |
|
||||
| 143 | retrieveBlockingModel | Yes | No |
|
||||
| 144 | tsdbMetaCompactRatio | Yes | No |
|
||||
| 145 | defaultJSONStrType | Yes | No |
|
||||
| 146 | walFlushSize | Yes | No |
|
||||
| 147 | keepTimeOffset | Yes | No |
|
||||
| 148 | flowctrl | Yes | No |
|
||||
| 149 | slaveQuery | Yes | No |
|
||||
| 150 | adjustMaster | Yes | No |
|
||||
| 151 | topicBinaryLen | Yes | No |
|
||||
| 152 | telegrafUseFieldNum | Yes | No |
|
||||
| 153 | deadLockKillQuery | Yes | No |
|
||||
| 154 | clientMerge | Yes | No |
|
||||
| 155 | sdbDebugFlag | Yes | No |
|
||||
| 156 | odbcDebugFlag | Yes | No |
|
||||
| 157 | httpDebugFlag | Yes | No |
|
||||
| 158 | monDebugFlag | Yes | No |
|
||||
| 159 | cqDebugFlag | Yes | No |
|
||||
| 160 | shortcutFlag | Yes | No |
|
||||
| 161 | probeSeconds | Yes | No |
|
||||
| 162 | probeKillSeconds | Yes | No |
|
||||
| 163 | probeInterval | Yes | No |
|
||||
| 164 | lossyColumns | Yes | No |
|
||||
| 165 | fPrecision | Yes | No |
|
||||
| 166 | dPrecision | Yes | No |
|
||||
| 167 | maxRange | Yes | No |
|
||||
| 168 | range | Yes | No |
|
||||
|
|
|
@ -47,9 +47,8 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
|||
|
||||
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
|
||||
|
||||
For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label
|
||||
is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
|
||||
is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
|
||||
For example, the following data rows write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
|
||||
as "passit" (BINARY), c4 column as 4 (DOUBLE), and the primary key timestamp as 1626006833639000000 to child table with the t1 label as "3" (NCHAR), the t2 label as "4" (NCHAR), and the t3 label as "t3" (NCHAR) and the super table named `st`.
|
||||
|
||||
```json
|
||||
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
||||
|
@ -69,7 +68,7 @@ Schemaless writes process row data according to the following principles.
|
|||
|
||||
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
|
||||
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
|
||||
You can configure smlChildTableName to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
|
||||
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
|
||||
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
|
||||
|
@ -78,7 +77,7 @@ You can configure smlChildTableName to specify table names, for example, `smlChi
|
|||
NULL.
|
||||
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
|
||||
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
|
||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat to false. Otherwise, data will be written out of order and a database error will occur.
|
||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: taosKeeper
|
||||
title: taosKeeper
|
||||
description: Instructions and tips for using taosKeeper
|
||||
description: exports TDengine monitoring metrics.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
|
|||
|
||||
### Configuration and running methods
|
||||
|
||||
<!-- taosKeeper needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [configuration file](#configuration-file-parameters-in-detail). Command-line arguments take precedence over values in the configuration file. -->
|
||||
taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
|
||||
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
|
||||
|
||||
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
|
||||
|
||||
<!--
|
||||
### Command-Line Parameters
|
||||
|
||||
You can use command-line parameters to run taosBenchmark and control its behavior:
|
||||
You can use command-line parameters to run taosKeeper and control its behavior:
|
||||
|
||||
```shell
|
||||
taosKeeper
|
||||
$ taosKeeper
|
||||
```
|
||||
-->
|
||||
### Environment variable
|
||||
|
||||
You can use Environment variable to run taosKeeper and control its behavior:
|
||||
|
||||
```shell
|
||||
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
|
||||
|
||||
$ taoskeeper
|
||||
```
|
||||
|
||||
you can run `taoskeeper -h` for more detail.
|
||||
|
||||
### Configuration File
|
||||
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
|
||||
```shell
|
||||
taoskeeper -c <keeper config file>
|
||||
$ taoskeeper -c <keeper config file>
|
||||
```
|
||||
|
||||
**Sample configuration files**
|
||||
|
@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
|
|||
#### Export Monitoring Metrics
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:6043/metrics
|
||||
$ curl http://127.0.0.1:6043/metrics
|
||||
```
|
||||
|
||||
Sample result set (excerpt):
|
||||
|
|
|
@ -51,5 +51,6 @@ port: 8125
|
|||
Start StatsD after adding the following (assuming the config file is modified to config.js)
|
||||
|
||||
```
|
||||
npm install
|
||||
node stats.js config.js &
|
||||
```
|
||||
|
|
|
@ -22,5 +22,4 @@ An example is as follows.
|
|||
username = "root"
|
||||
password = "taosdata"
|
||||
data_format = "influx"
|
||||
influx_max_line_bytes = 250
|
||||
```
|
||||
|
|
|
@ -30,21 +30,20 @@ After restarting Prometheus, you can refer to the following example to verify th
|
|||
|
||||
```
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
|
||||
====================================================================================================================================================================================================================================================================================
|
||||
test | 2022-04-12 08:07:58.756 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
||||
log | 2022-04-20 07:19:50.260 | 2 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
||||
prometheus_data | 2022-04-20 07:21:09.202 | 158 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
|
||||
db | 2022-04-15 06:37:08.512 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
|
||||
Query OK, 4 row(s) in set (0.000585s)
|
||||
name |
|
||||
=================================
|
||||
information_schema |
|
||||
performance_schema |
|
||||
prometheus_data |
|
||||
Query OK, 3 row(s) in set (0.000585s)
|
||||
|
||||
taos> use prometheus_data;
|
||||
Database changed.
|
||||
|
||||
taos> show stables;
|
||||
name | created_time | columns | tags | tables |
|
||||
============================================================================================
|
||||
metrics | 2022-04-20 07:21:09.209 | 2 | 1 | 1389 |
|
||||
name |
|
||||
=================================
|
||||
metrics |
|
||||
Query OK, 1 row(s) in set (0.000487s)
|
||||
|
||||
taos> select * from metrics limit 10;
|
||||
|
@ -89,3 +88,7 @@ VALUE TIMESTAMP
|
|||
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
- TDengine will automatically create unique IDs for sub-table names by the rule.
|
||||
:::
|
||||
|
|