other: merge main.
This commit is contained in:
commit
1e020a46c9
|
@ -130,3 +130,4 @@ tools/COPYING
|
|||
tools/BUGS
|
||||
tools/taos-tools
|
||||
tools/taosws-rs
|
||||
tags
|
||||
|
|
|
@ -10,6 +10,8 @@ if (NOT DEFINED TD_SOURCE_DIR)
|
|||
set( TD_SOURCE_DIR ${PROJECT_SOURCE_DIR} )
|
||||
endif()
|
||||
|
||||
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||
|
||||
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
||||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
# 贡献者契约行为准则
|
||||
|
||||
[](code_of_conduct.md)
|
||||
|
||||
## 我们的承诺
|
||||
|
||||
为了营造一个开放和热情的环境,作为贡献者和维护者,我们承诺让每个人参与我们的项目和社区成为一种无骚扰的体验,无论年龄、体型、残疾、种族、性别特征、性别认同和表达、经验水平、教育、社会经济地位、国籍、个人外表、种族、宗教或性认同和取向如何。
|
||||
|
||||
## 我们的标准
|
||||
|
||||
有助于创造积极环境的行为示例包括:
|
||||
|
||||
- 使用热情和包容的语言
|
||||
- 尊重不同的观点和经历
|
||||
- 优雅地接受建设性的批评
|
||||
- 专注于对社区最有利的事情
|
||||
- 对其他社区成员表示同情
|
||||
|
||||
参与者不可接受的行为示例包括:
|
||||
|
||||
- 使用性感的语言或图像以及不受欢迎的性关注或进步
|
||||
- 拖钓、侮辱/贬损评论以及人身或政治攻击
|
||||
- 公共或私人骚扰
|
||||
- 未经明确许可发布他人的私人信息,例如物理地址或电子地址
|
||||
- 在专业环境中可能被合理认为不适当的其他行为
|
||||
|
||||
## 我们的责任
|
||||
|
||||
项目维护人员负责阐明可接受行为的标准,并期望针对任何不可接受行为的情况采取适当和公平的纠正措施。
|
||||
|
||||
项目维护者有权利和责任删除、编辑或拒绝评论、提交、代码、wiki 编辑、问题和其他不符合本行为准则的贡献,或暂时或永久禁止任何贡献者从事他们认为不适当、威胁、冒犯或有害的其他行为。
|
||||
|
||||
## 范围
|
||||
|
||||
本行为准则适用于所有项目空间,也适用于个人在公共场所代表项目或其社区时。 代表项目或社区的示例包括使用官方项目电子邮件地址、通过官方社交媒体帐户发布信息或在在线或离线活动中担任指定代表。 项目的表示可以由项目维护者进一步定义和澄清。
|
||||
|
||||
## 执法
|
||||
|
||||
可以通过 support@taosdata.com 联系项目团队来报告辱骂、骚扰或其他不可接受的行为。 所有投诉都将被审查和调查,并将产生被认为必要且适合具体情况的回应。 项目团队有义务对事件的报告者保密。 具体执行政策的更多细节可能会单独发布。
|
||||
|
||||
不善意遵守或执行行为准则的项目维护者可能会面临由项目领导的其他成员确定的临时或永久影响。
|
||||
|
||||
## 来源
|
||||
|
||||
本行为准则改编自贡献者公约 1.4 版,可在 https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 获取
|
||||
|
||||
有关此行为准则的常见问题的答案,请参阅 https://www.contributor-covenant.org/faq
|
19
Jenkinsfile2
19
Jenkinsfile2
|
@ -40,7 +40,7 @@ def check_docs() {
|
|||
sh '''
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git clean -fxd
|
||||
git clean -f
|
||||
rm -rf examples/rust/
|
||||
git remote prune origin
|
||||
git fetch
|
||||
|
@ -86,7 +86,7 @@ def pre_test(){
|
|||
git fetch
|
||||
cd ${WKC}
|
||||
git reset --hard
|
||||
git clean -fxd
|
||||
git clean -f
|
||||
rm -rf examples/rust/
|
||||
git remote prune origin
|
||||
git fetch
|
||||
|
@ -173,7 +173,7 @@ def pre_test_build_mac() {
|
|||
'''
|
||||
sh '''
|
||||
cd ${WK}/debug
|
||||
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false
|
||||
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
|
||||
make -j10
|
||||
ctest -j10 || exit 7
|
||||
'''
|
||||
|
@ -201,7 +201,7 @@ def pre_test_win(){
|
|||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git clean -fxd
|
||||
git clean -f
|
||||
git reset --hard
|
||||
git remote prune origin
|
||||
git fetch
|
||||
|
@ -313,7 +313,8 @@ def pre_test_build_win() {
|
|||
bat '''
|
||||
cd %WIN_CONNECTOR_ROOT%
|
||||
python.exe -m pip install --upgrade pip
|
||||
python -m pip install .
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.6
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
|
@ -331,8 +332,6 @@ def run_win_test() {
|
|||
bat '''
|
||||
echo "windows test ..."
|
||||
cd %WIN_CONNECTOR_ROOT%
|
||||
python.exe -m pip install --upgrade pip
|
||||
python -m pip install .
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
ls -l C:\\Windows\\System32\\taos.dll
|
||||
time /t
|
||||
|
@ -387,7 +386,7 @@ pipeline {
|
|||
}
|
||||
steps {
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
timeout(time: 75, unit: 'MINUTES'){
|
||||
timeout(time: 126, unit: 'MINUTES'){
|
||||
pre_test_win()
|
||||
pre_test_build_win()
|
||||
run_win_ctest()
|
||||
|
@ -423,7 +422,7 @@ pipeline {
|
|||
echo "${WKDIR}/restore.sh -p ${BRANCH_NAME} -n ${BUILD_ID} -c {container name}"
|
||||
}
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
timeout(time: 120, unit: 'MINUTES'){
|
||||
timeout(time: 130, unit: 'MINUTES'){
|
||||
pre_test()
|
||||
script {
|
||||
sh '''
|
||||
|
@ -461,7 +460,7 @@ pipeline {
|
|||
cd ${WKC}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' + extra_param + '''
|
||||
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 600 ''' + extra_param + '''
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
|
14
README-CN.md
14
README-CN.md
|
@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
|
|||
### Ubuntu 18.04 及以上版本 & Debian:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
```
|
||||
|
||||
#### 为 taos-tools 安装编译需要的软件
|
||||
|
@ -104,6 +104,16 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
|
|||
sudo yum config-manager --set-enabled Powertools
|
||||
```
|
||||
|
||||
#### CentOS + devtoolset
|
||||
|
||||
除上述编译依赖包,需要执行以下命令:
|
||||
|
||||
```
|
||||
sudo yum install centos-release-scl
|
||||
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
|
||||
scl enable devtoolset-9 -- bash
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
|
@ -342,4 +352,4 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
|
|||
|
||||
# 加入技术交流群
|
||||
|
||||
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
|
||||
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine1",加小 T 为好友,即可入群。
|
||||
|
|
16
README.md
16
README.md
|
@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
|||
### Ubuntu 18.04 and above or Debian
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools
|
||||
|
@ -111,6 +111,16 @@ If the PowerTools installation fails, you can try to use:
|
|||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
#### For CentOS + devtoolset
|
||||
|
||||
Besides above dependencies, please run following commands:
|
||||
|
||||
```
|
||||
sudo yum install centos-release-scl
|
||||
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
|
||||
scl enable devtoolset-9 -- bash
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
|
@ -355,6 +365,6 @@ Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to th
|
|||
For more information about TDengine, you can follow us on social media and join our Discord server:
|
||||
|
||||
- [Discord](https://discord.com/invite/VZdSuUg4pS)
|
||||
- [Twitter](https://twitter.com/TaosData)
|
||||
- [Twitter](https://twitter.com/TDengineDB)
|
||||
- [LinkedIn](https://www.linkedin.com/company/tdengine/)
|
||||
- [YouTube](https://www.youtube.com/channel/UCmp-1U6GS_3V3hjir6Uq5DQ)
|
||||
- [YouTube](https://www.youtube.com/@tdengine)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||
|
@ -118,9 +119,12 @@ ELSE ()
|
|||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
# disable all assert
|
||||
|
|
|
@ -21,7 +21,7 @@ IF (TD_LINUX)
|
|||
ELSEIF (TD_WINDOWS)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER} ${TD_BUILD_TAOSA_INTERNAL})")
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
|
|
|
@ -171,3 +171,8 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_RELEASE
|
||||
"If build release version"
|
||||
OFF
|
||||
)
|
||||
|
|
|
@ -37,6 +37,21 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
SET(TD_LINUX_32 TRUE)
|
||||
ENDIF ()
|
||||
|
||||
EXECUTE_PROCESS(COMMAND chmod 777 ${CMAKE_CURRENT_LIST_DIR}/../packaging/tools/get_os.sh)
|
||||
EXECUTE_PROCESS(COMMAND readlink /bin/sh OUTPUT_VARIABLE SHELL_LINK)
|
||||
MESSAGE(STATUS "The shell is: " ${SHELL_LINK})
|
||||
|
||||
IF (${SHELL_LINK} MATCHES "dash")
|
||||
EXECUTE_PROCESS(COMMAND ${CMAKE_CURRENT_LIST_DIR}/../packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
|
||||
ELSE ()
|
||||
EXECUTE_PROCESS(COMMAND sh ${CMAKE_CURRENT_LIST_DIR}/../packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
|
||||
ENDIF ()
|
||||
MESSAGE(STATUS "The current OS is " ${TD_OS_INFO})
|
||||
IF (${TD_OS_INFO} MATCHES "Alpine")
|
||||
SET(TD_ALPINE TRUE)
|
||||
ADD_DEFINITIONS("-D_ALPINE")
|
||||
ENDIF ()
|
||||
|
||||
ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
|
||||
SET(TD_DARWIN TRUE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.2.5")
|
||||
SET(TD_VER_NUMBER "3.0.4.1")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
@ -16,7 +16,7 @@ find_program(HAVE_GIT NAMES git)
|
|||
IF (DEFINED GITINFO)
|
||||
SET(TD_VER_GIT ${GITINFO})
|
||||
ELSEIF (HAVE_GIT)
|
||||
execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMITID)
|
||||
execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${TD_COMMUNITY_DIR} OUTPUT_VARIABLE GIT_COMMITID)
|
||||
#message(STATUS "git log result:${GIT_COMMITID}")
|
||||
IF (GIT_COMMITID)
|
||||
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
|
||||
|
@ -30,6 +30,23 @@ ELSE ()
|
|||
SET(TD_VER_GIT "no git commit id")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED GITINFOI)
|
||||
SET(TD_VER_GIT_INTERNAL ${GITINFOI})
|
||||
ELSEIF (HAVE_GIT)
|
||||
execute_process(COMMAND git log -1 --format=%H WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_VARIABLE GIT_COMMITID)
|
||||
message(STATUS "git log result:${GIT_COMMITID}")
|
||||
IF (GIT_COMMITID)
|
||||
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
|
||||
SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID})
|
||||
ELSE ()
|
||||
message(STATUS "not a git repository")
|
||||
SET(TD_VER_GIT "no git commit id")
|
||||
ENDIF ()
|
||||
ELSE ()
|
||||
message(STATUS "no git cmd")
|
||||
SET(TD_VER_GIT_INTERNAL "no git commit id")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERDATE)
|
||||
SET(TD_VER_DATE ${VERDATE})
|
||||
ELSE ()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG db6c843
|
||||
GIT_TAG 565ca21
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 7c641c5
|
||||
GIT_TAG 4378702
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG f406d51
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
-DLINUX
|
||||
-DWEBSOCKET
|
||||
-I/usr/include
|
||||
-Iinclude
|
||||
-Iinclude/os
|
||||
-Iinclude/common
|
||||
-Iinclude/util
|
||||
-Iinclude/libs/transport
|
||||
-Itools/shell/inc
|
|
@ -204,7 +204,7 @@ group vnodeProcessReqs()
|
|||
s -> s:
|
||||
note right
|
||||
save the requests in log store
|
||||
and wait for comfirmation or
|
||||
and wait for confirmation or
|
||||
other cases
|
||||
end note
|
||||
|
||||
|
@ -236,7 +236,7 @@ s -> s: syncAppendReqToLogStore()
|
|||
s -> v: walWrite()
|
||||
|
||||
alt has meta req
|
||||
<- s: comfirmation
|
||||
<- s: confirmation
|
||||
else
|
||||
s -> v: vnodeApplyReqs()
|
||||
end
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
title: TDengine Documentation
|
||||
sidebar_label: Documentation Home
|
||||
description: This website contains the user manuals for TDengine, an open-source, cloud-native time-series database optimized for IoT, Connected Cars, and Industrial IoT.
|
||||
slug: /
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Introduction
|
||||
description: This document introduces the major features, competitive advantages, typical use cases, and benchmarks of TDengine.
|
||||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
|
@ -122,11 +123,11 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
## Comparison with other databases
|
||||
|
||||
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html)
|
||||
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html)
|
||||
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
|
||||
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
|
||||
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
|
||||
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
|
||||
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
|
||||
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
|
||||
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
|
||||
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
|
||||
|
||||
## More readings
|
||||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Concepts
|
||||
description: This document describes the basic concepts of TDengine, including the supertable.
|
||||
---
|
||||
|
||||
In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Docker
|
||||
title: Quick Install on Docker
|
||||
sidebar_label: Docker
|
||||
description: This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||
---
|
||||
|
||||
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Package
|
||||
title: Quick Install from Package
|
||||
sidebar_label: Package
|
||||
description: This document describes how to install TDengine on Linux, Windows, and macOS and perform queries and inserts.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -187,7 +188,7 @@ You can use the TDengine CLI to monitor your TDengine deployment and execute ad
|
|||
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengine Server.
|
||||
After the installation is complete, please run `sc start taosd` or run `C:\TDengine\taosd.exe` with administrator privilege to start TDengine Server.
|
||||
|
||||
## Command Line Interface (CLI)
|
||||
|
||||
|
@ -201,16 +202,18 @@ After the installation is complete, double-click the /applications/TDengine to s
|
|||
|
||||
The following `launchctl` commands can help you manage TDengine service:
|
||||
|
||||
- Start TDengine Server: `launchctl start com.tdengine.taosd`
|
||||
- Start TDengine Server: `sudo launchctl start com.tdengine.taosd`
|
||||
|
||||
- Stop TDengine Server: `launchctl stop com.tdengine.taosd`
|
||||
- Stop TDengine Server: `sudo launchctl stop com.tdengine.taosd`
|
||||
|
||||
- Check TDengine Server status: `launchctl list | grep taosd`
|
||||
- Check TDengine Server status: `sudo launchctl list | grep taosd`
|
||||
|
||||
:::info
|
||||
|
||||
- The `launchctl` command does not require _root_ privileges. You don't need to use the `sudo` command.
|
||||
- The first content returned by the `launchctl list | grep taosd` command is the PID of the program, if '-' indicates that the TDengine service is not running.
|
||||
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
|
||||
- The administrator privilege is required for service management to enhance security.
|
||||
- Troubleshooting:
|
||||
- The first column returned by the command `launchctl list | grep taosd` is the PID of the program. If it's `-`, that means the TDengine service is not running.
|
||||
- If the service is abnormal, please check the `launchd.log` file from the system log or the `taosdlog` from the `/var/log/taos directory` for more information.
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Get Started
|
||||
description: This article describes how to install TDengine and test its performance.
|
||||
description: This document describes how to install TDengine on various platforms.
|
||||
---
|
||||
|
||||
import GitHubSVG from './github.svg'
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Connect
|
||||
title: Connect to TDengine
|
||||
description: "How to establish connections to TDengine and how to install and use TDengine connectors."
|
||||
sidebar_label: Connect
|
||||
description: This document describes how to establish connections to TDengine and how to install and use TDengine connectors.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Data Model
|
||||
description: This document describes the data model of TDengine.
|
||||
---
|
||||
|
||||
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Insert Using SQL
|
||||
description: This document describes how to insert data into TDengine using SQL.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -29,25 +30,31 @@ Application programs can execute `INSERT` statement through connectors to insert
|
|||
The below SQL statement is used to insert one row into table "d1001".
|
||||
|
||||
```sql
|
||||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
|
||||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
|
||||
```
|
||||
|
||||
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
|
||||
### Insert Multiple Rows
|
||||
|
||||
Multiple rows can be inserted in a single SQL statement. The example below inserts 2 rows into table "d1001".
|
||||
|
||||
```sql
|
||||
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
|
||||
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
|
||||
```
|
||||
|
||||
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
|
||||
### Insert into Multiple Tables
|
||||
|
||||
Data can be inserted into multiple tables in the same SQL statement. The example below inserts 2 rows into table "d1001" and 1 row into table "d1002".
|
||||
|
||||
```sql
|
||||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
|
||||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
|
||||
```
|
||||
|
||||
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
|
||||
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
|
||||
|
||||
:::info
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Write from Kafka
|
||||
description: This document describes how to insert data into TDengine using Kafka.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: InfluxDB Line Protocol
|
||||
title: InfluxDB Line Protocol
|
||||
sidebar_label: InfluxDB Line Protocol
|
||||
description: This document describes how to insert data into TDengine using the InfluxDB Line Protocol.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -38,7 +39,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
|
||||
:::
|
||||
|
||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: OpenTSDB Line Protocol
|
||||
title: OpenTSDB Line Protocol
|
||||
sidebar_label: OpenTSDB Line Protocol
|
||||
description: This document describes how to insert data into TDengine using the OpenTSDB Line Protocol.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: OpenTSDB JSON Protocol
|
||||
title: OpenTSDB JSON Protocol
|
||||
sidebar_label: OpenTSDB JSON Protocol
|
||||
description: This document describes how to insert data into TDengine using the OpenTSDB JSON protocol.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -47,7 +48,6 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
|||
:::note
|
||||
|
||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||
- Only data in array format is accepted and so an array must be used even if there is only one row.
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
:::
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: High Performance Writing
|
||||
title: High Performance Writing
|
||||
sidebar_label: High Performance Writing
|
||||
description: This document describes how to achieve high performance when writing data into TDengine.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -27,7 +28,7 @@ From the perspective of application program, you need to consider:
|
|||
- Writing to known existing tables is more efficient than writing to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it.
|
||||
- Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creates table automatically and may alter table schema.
|
||||
|
||||
Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
|
||||
Application programs need to take care of the above factors and try to take advantage of them. The application program should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
|
||||
|
||||
### Data Source
|
||||
|
||||
|
|
|
@ -53,8 +53,69 @@ for p in ps:
|
|||
|
||||
In addition to python's built-in multithreading and multiprocessing library, we can also use the third-party library gunicorn.
|
||||
|
||||
### Examples
|
||||
### examples
|
||||
|
||||
<details>
|
||||
<summary>kafka_example_perform</summary>
|
||||
|
||||
`kafka_example_perform` is the entry point of the examples.
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example.py}}
|
||||
{{#include docs/examples/python/kafka_example_perform.py}}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>kafka_example_common</summary>
|
||||
|
||||
`kafka_example_common` is the common code of the examples.
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example_common.py}}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>kafka_example_producer</summary>
|
||||
|
||||
`kafka_example_producer` is `producer`, which is responsible for generating test data and sending it to kafka.
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example_producer.py}}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>kafka_example_consumer</summary>
|
||||
|
||||
`kafka_example_consumer` is `consumer`,which is responsible for consuming data from kafka and writing it to TDengine.
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example_consumer.py}}
|
||||
```
|
||||
</details>
|
||||
|
||||
### execute Python examples
|
||||
|
||||
<details>
|
||||
<summary>execute Python examples</summary>
|
||||
|
||||
1. install and start up `kafka`
|
||||
2. install python3 and pip
|
||||
3. install `taospy` by pip
|
||||
4. install `kafka-python` by pip
|
||||
5. execute this example
|
||||
|
||||
The entry point of this example is `kafka_example_perform.py`. For more information about usage, please use `--help` command.
|
||||
|
||||
```
|
||||
python3 kafka_example_perform.py --help
|
||||
```
|
||||
|
||||
For example, the following command is creating 100 sub-table and inserting 20000 data for each table and the kafka max poll is 100 and 1 thread and 1 process per thread.
|
||||
|
||||
```
|
||||
python3 kafka_example_perform.py -table-count=100 -table-items=20000 -max-poll=100 -threads=1 -processes=1
|
||||
```
|
||||
|
||||
</details>
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Insert Data
|
||||
description: This document describes how to insert data into TDengine.
|
||||
---
|
||||
|
||||
TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Query Data
|
||||
description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors."
|
||||
description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using connectors.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Stream Processing
|
||||
description: "The TDengine stream processing engine combines data inserts, preprocessing, analytics, real-time computation, and alerting into a single component."
|
||||
title: Stream Processing
|
||||
sidebar_label: Stream Processing
|
||||
description: This document describes the stream processing component of TDengine.
|
||||
---
|
||||
|
||||
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. In a traditional time-series solution, this generally requires the deployment of stream processing systems such as Kafka or Flink. However, the complexity of such systems increases the cost of development and maintenance.
|
||||
|
|
|
@ -7,6 +7,7 @@ title: Data Subscription
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import Java from "./_sub_java.mdx";
|
||||
import JavaWS from "./_sub_java_ws.mdx"
|
||||
import Python from "./_sub_python.mdx";
|
||||
import Go from "./_sub_go.mdx";
|
||||
import Rust from "./_sub_rust.mdx";
|
||||
|
@ -22,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
|||
|
||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||
|
||||
|
||||
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
|
||||
|
||||
## Data Schema and API
|
||||
|
||||
|
@ -94,22 +95,21 @@ void close() throws SQLException;
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
class TaosConsumer():
|
||||
def __init__(self, *topics, **configs)
|
||||
class Consumer:
|
||||
def subscribe(self, topics):
|
||||
pass
|
||||
|
||||
def __iter__(self)
|
||||
def unsubscribe(self):
|
||||
pass
|
||||
|
||||
def __next__(self)
|
||||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def sync_next(self)
|
||||
|
||||
def subscription(self)
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def unsubscribe(self)
|
||||
|
||||
def close(self)
|
||||
|
||||
def __del__(self)
|
||||
def commit(self, message):
|
||||
pass
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -117,19 +117,22 @@ class TaosConsumer():
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
func NewConsumer(conf *Config) (*Consumer, error)
|
||||
func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
// rebalanceCb is reserved for compatibility purpose
|
||||
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
|
||||
// rebalanceCb is reserved for compatibility purpose
|
||||
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error
|
||||
|
||||
func (c *Consumer) FreeMessage(message unsafe.Pointer)
|
||||
func (c *Consumer) Poll(timeoutMs int) tmq.Event
|
||||
|
||||
func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
|
||||
|
||||
func (c *Consumer) Subscribe(topics []string) error
|
||||
// tmq.TopicPartition is reserved for compatibility purpose
|
||||
func (c *Consumer) Commit() ([]tmq.TopicPartition, error)
|
||||
|
||||
func (c *Consumer) Unsubscribe() error
|
||||
|
||||
func (c *Consumer) Close() error
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -220,7 +223,7 @@ A database including one supertable and two subtables is created as follows:
|
|||
```sql
|
||||
DROP DATABASE IF EXISTS tmqdb;
|
||||
CREATE DATABASE tmqdb;
|
||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16));
|
||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
|
||||
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
||||
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
||||
INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
|
||||
|
@ -282,18 +285,17 @@ You configure the following parameters when creating a consumer:
|
|||
|
||||
| Parameter | Type | Description | Remarks |
|
||||
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
||||
| `client.id` | string | Client ID | Maximum length: 192. |
|
||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `enable.auto.commit` | boolean | Commit automatically | Specify `true` or `false`. |
|
||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||
| `enable.heartbeat.background` | boolean | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | |
|
||||
| `experimental.snapshot.enable` | boolean | Specify whether to consume messages from the WAL or from TSBS | |
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages |
|
||||
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||
|
||||
The method of specifying these parameters depends on the language used:
|
||||
|
||||
|
@ -357,50 +359,19 @@ public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
|||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
config := tmq.NewConfig()
|
||||
defer config.Destroy()
|
||||
err = config.SetGroupID("test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetAutoOffsetReset("earliest")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectIP("127.0.0.1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectUser("root")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPass("taosdata")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetConnectPort("6030")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.SetMsgWithTableName(true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableHeartBeat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
|
||||
if result.ErrCode != 0 {
|
||||
errStr := wrapper.TMQErr2Str(result.ErrCode)
|
||||
err := errors.NewError(int(result.ErrCode), errStr)
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "earliest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -422,23 +393,31 @@ let mut consumer = tmq.build()?;
|
|||
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
from taos.tmq import Consumer
|
||||
|
||||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
```
|
||||
|
||||
Python programs use the following parameters:
|
||||
|
||||
| Parameter | Type | Description | Remarks |
|
||||
| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||
| `td_connect_ip` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td_connect_user` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td_connect_pass` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `td_connect_port` | string | Used in establishing a connection; same as `taos_connect` | |
|
||||
| `group_id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
||||
| `client_id` | string | Client ID | Maximum length: 192. |
|
||||
| `auto_offset_reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `enable_auto_commit` | string | Commit automatically | Specify `true` or `false`. |
|
||||
| `auto_commit_interval_ms` | string | Interval for automatic commits, in milliseconds |
|
||||
| `enable_heartbeat_background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false`. |
|
||||
| `experimental_snapshot_enable` | string | Specify whether to consume messages from the WAL or from TSBS | Specify `true` or `false`. |
|
||||
| `msg_with_table_name` | string | Specify whether to deserialize table names from messages | Specify `true` or `false`.
|
||||
| `timeout` | int | Consumer pull timeout | |
|
||||
| Parameter | Type | Description | Remarks |
|
||||
|:---------:|:----:|:-----------:|:-------:|
|
||||
| `td.connect.ip` | string | Used in establishing a connection||
|
||||
| `td.connect.user` | string | Used in establishing a connection||
|
||||
| `td.connect.pass` | string | Used in establishing a connection||
|
||||
| `td.connect.port` | string | Used in establishing a connection||
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
|
||||
| `client.id` | string | Client ID | Maximum length: 192 |
|
||||
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
|
||||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
|
||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
||||
|
||||
</TabItem>
|
||||
|
||||
|
@ -523,11 +502,7 @@ consumer.subscribe(topics);
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer, err := tmq.NewConsumer(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = consumer.Subscribe([]string{"example_tmq_topic"})
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -545,7 +520,7 @@ consumer.subscribe(["tmq_meters"]).await?;
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -611,13 +586,17 @@ while(running){
|
|||
|
||||
```go
|
||||
for {
|
||||
result, err := consumer.Poll(time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
ev := consumer.Poll(0)
|
||||
if ev != nil {
|
||||
switch e := ev.(type) {
|
||||
case *tmqcommon.DataMessage:
|
||||
fmt.Println(e.Value())
|
||||
case tmqcommon.Error:
|
||||
fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
|
||||
panic(e)
|
||||
}
|
||||
consumer.Commit()
|
||||
}
|
||||
fmt.Println(result)
|
||||
consumer.Commit(context.Background(), result.Message)
|
||||
consumer.FreeMessage(result.Message)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -660,9 +639,17 @@ for {
|
|||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
for msg in consumer:
|
||||
for row in msg:
|
||||
print(row)
|
||||
while True:
|
||||
res = consumer.poll(100)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -729,7 +716,11 @@ consumer.close();
|
|||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
consumer.Close()
|
||||
/* Unsubscribe */
|
||||
_ = consumer.Unsubscribe()
|
||||
|
||||
/* Close consumer */
|
||||
_ = consumer.Close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -815,7 +806,14 @@ The following section shows sample code in various languages.
|
|||
</TabItem>
|
||||
|
||||
<TabItem label="Java" value="java">
|
||||
<Java />
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
<Java />
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection">
|
||||
<JavaWS />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Go" value="Go">
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Caching
|
||||
title: Caching
|
||||
description: "This document describes the caching component of TDengine."
|
||||
sidebar_label: Caching
|
||||
description: This document describes the caching component of TDengine.
|
||||
---
|
||||
|
||||
TDengine uses various kinds of caching techniques to efficiently write and query data. This document describes the caching component of TDengine.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: UDF
|
||||
title: User-Defined Functions (UDF)
|
||||
description: "You can define your own scalar and aggregate functions to expand the query capabilities of TDengine."
|
||||
sidebar_label: UDF
|
||||
description: This document describes how to create user-defined functions (UDF), your own scalar and aggregate functions that can expand the query capabilities of TDengine.
|
||||
---
|
||||
|
||||
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
|
||||
|
@ -65,11 +65,11 @@ int32_t aggfn_init() {
|
|||
}
|
||||
|
||||
// aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix
|
||||
// @param interbuf intermediate value to intialize
|
||||
// @param interbuf intermediate value to initialize
|
||||
// @return error number defined in taoserror.h
|
||||
int32_t aggfn_start(SUdfInterBuf* interBuf) {
|
||||
// initialize intermediate value in interBuf
|
||||
return TSDB_CODE_SUCESS;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf).
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
||||
```
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
||||
```
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
||||
```
|
||||
```
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java}}
|
||||
```
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
|
||||
```
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
|
||||
```
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Developer Guide
|
||||
description: This document describes how to use the various components of TDengine from a developer's perspective.
|
||||
---
|
||||
|
||||
Before creating an application to process time-series data with TDengine, consider the following:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Manual Deployment
|
||||
title: Manual Deployment and Management
|
||||
sidebar_label: Manual Deployment
|
||||
description: This document describes how to deploy TDengine on a server.
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Kubernetes
|
||||
title: Deploying a TDengine Cluster in Kubernetes
|
||||
sidebar_label: Kubernetes
|
||||
description: This document describes how to deploy TDengine on Kubernetes.
|
||||
---
|
||||
|
||||
TDengine is a cloud-native time-series database that can be deployed on Kubernetes. This document gives a step-by-step description of how you can use YAML files to create a TDengine cluster and introduces common operations for TDengine in a Kubernetes environment.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Helm
|
||||
title: Use Helm to deploy TDengine
|
||||
sidebar_label: Helm
|
||||
description: This document describes how to deploy TDengine on Kubernetes by using Helm.
|
||||
---
|
||||
|
||||
Helm is a package manager for Kubernetes that can provide more capabilities in deploying on Kubernetes.
|
||||
|
@ -22,7 +23,7 @@ Helm uses the kubectl and kubeconfig configurations to perform Kubernetes operat
|
|||
To use TDengine Chart, download it from GitHub:
|
||||
|
||||
```bash
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz
|
||||
|
||||
```
|
||||
|
||||
|
@ -38,7 +39,7 @@ With minikube, the default value is standard.
|
|||
Use Helm commands to install TDengine:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
helm install tdengine tdengine-3.0.2.tgz \
|
||||
--set storage.className=<your storage class name>
|
||||
|
||||
```
|
||||
|
@ -46,7 +47,7 @@ helm install tdengine tdengine-3.0.0.tgz \
|
|||
You can configure a small storage size in minikube to ensure that your deployment does not exceed your available disk space.
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz \
|
||||
helm install tdengine tdengine-3.0.2.tgz \
|
||||
--set storage.className=standard \
|
||||
--set storage.dataSize=2Gi \
|
||||
--set storage.logSize=10Mi
|
||||
|
@ -83,14 +84,14 @@ You can configure custom parameters in TDengine with the `values.yaml` file.
|
|||
Run the `helm show values` command to see all parameters supported by TDengine Chart.
|
||||
|
||||
```bash
|
||||
helm show values tdengine-3.0.0.tgz
|
||||
helm show values tdengine-3.0.2.tgz
|
||||
|
||||
```
|
||||
|
||||
Save the output of this command as `values.yaml`. Then you can modify this file with your desired values and use it to deploy a TDengine cluster:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-3.0.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.0.2.tgz -f values.yaml
|
||||
|
||||
```
|
||||
|
||||
|
@ -107,7 +108,7 @@ image:
|
|||
prefix: tdengine/tdengine
|
||||
#pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
# tag: "3.0.0.0"
|
||||
# tag: "3.0.2.0"
|
||||
|
||||
service:
|
||||
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
||||
|
@ -155,15 +156,15 @@ clusterDomainSuffix: ""
|
|||
# See the [Configuration Variables](../../reference/config)
|
||||
#
|
||||
# Note:
|
||||
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
|
||||
# 2. serverPort: should not be setted, we'll use the default 6030 in many places.
|
||||
# 3. fqdn: will be auto generated in kubenetes, user should not care about it.
|
||||
# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up.
|
||||
# 2. serverPort: should not be set, we'll use the default 6030 in many places.
|
||||
# 3. fqdn: will be auto generated in kubernetes, user should not care about it.
|
||||
# 4. role: currently role is not supported - every node is able to be mnode and vnode.
|
||||
#
|
||||
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
||||
taoscfg:
|
||||
# Starts as cluster or not, must be 0 or 1.
|
||||
# 0: all pods will start as a seperate TDengine server
|
||||
# 0: all pods will start as a separate TDengine server
|
||||
# 1: pods will start as TDengine server cluster. [default]
|
||||
CLUSTER: "1"
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Deployment
|
||||
description: This document describes how to deploy a TDengine cluster on a server, on Kubernetes, and by using Helm.
|
||||
---
|
||||
|
||||
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Data Types
|
||||
title: Data Types
|
||||
description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.'
|
||||
sidebar_label: Data Types
|
||||
description: This document describes the data types that TDengine supports.
|
||||
---
|
||||
|
||||
## Timestamp
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Database
|
||||
title: Database
|
||||
description: "create and drop database, show or change database parameters"
|
||||
sidebar_label: Database
|
||||
description: This document describes how to create and perform operations on databases.
|
||||
---
|
||||
|
||||
## Create a Database
|
||||
|
@ -27,13 +27,16 @@ database_option: {
|
|||
| PRECISION {'ms' | 'us' | 'ns'}
|
||||
| REPLICA value
|
||||
| RETENTIONS ingestion_duration:keep_duration ...
|
||||
| STRICT {'off' | 'on'}
|
||||
| WAL_LEVEL {1 | 2}
|
||||
| VGROUPS value
|
||||
| SINGLE_STABLE {0 | 1}
|
||||
| STT_TRIGGER value
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_SEGMENT_SIZE value
|
||||
}
|
||||
```
|
||||
|
@ -55,15 +58,12 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
||||
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
||||
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. The Enterprise Edition supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; the Community Edition does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
|
||||
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
|
||||
- STRICT: specifies whether strong data consistency is enabled. The default value is off.
|
||||
- on: Strong consistency is enabled and implemented through the Raft consensus algorithm. In this mode, an operation is considered successful once it is confirmed by half of the nodes in the cluster.
|
||||
- off: Strong consistency is disabled. In this mode, an operation is considered successful when it is initiated by the local node.
|
||||
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
|
||||
- 1: WAL is enabled but fsync is disabled.
|
||||
- 2: WAL and fsync are both enabled.
|
||||
|
@ -71,11 +71,14 @@ database_option: {
|
|||
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
||||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
||||
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
|
||||
|
||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
||||
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
||||
### Example Statement
|
||||
|
||||
```sql
|
||||
|
@ -112,12 +115,34 @@ alter_database_options:
|
|||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
| BUFFER value
|
||||
| PAGES value
|
||||
| REPLICA value
|
||||
| STT_TRIGGER value
|
||||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
}
|
||||
```
|
||||
|
||||
### ALTER CACHESIZE
|
||||
|
||||
The command of changing database configuration parameters is easy to use, but it's hard to determine whether a parameter is proper or not. In this section we will describe how to determine whether cachesize is big enough.
|
||||
|
||||
1. How to check cachesize?
|
||||
|
||||
You can use `select * from information_schema.ins_databases;` to get the value of cachesize.
|
||||
|
||||
2. How to check cacheload?
|
||||
|
||||
You can use `show <db_name>.vgroups;` to check the value of cacheload.
|
||||
|
||||
3. Determine whether cachesize is big engough
|
||||
|
||||
If the value of `cacheload` is very close to the value of `cachesize`, then it's very probably that `cachesize` is too small. If the value of `cacheload` is much smaller than the value of `cachesize`, then `cachesize` is big enough. You can use this simple principle to determine. Depending on how much memory is available in your system, you can choose to double `cachesize` or incrase it by even 5 or more times.
|
||||
|
||||
:::note
|
||||
Other parameters cannot be modified after the database has been created.
|
||||
|
||||
|
@ -154,3 +179,27 @@ TRIM DATABASE db_name;
|
|||
```
|
||||
|
||||
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
|
||||
## Flush Data
|
||||
|
||||
```sql
|
||||
FLUSH DATABASE db_name;
|
||||
```
|
||||
|
||||
Flush data from memory onto disk. Before shutting down a node, executing this command can avoid data restore after restarting and speed up the startup process.
|
||||
|
||||
## Redistribute Vgroup
|
||||
|
||||
```sql
|
||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||
```
|
||||
|
||||
Adjust the distribution of vnodes in the vgroup according to the given list of dnodes.
|
||||
|
||||
## Balance Vgroup
|
||||
|
||||
```sql
|
||||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
Automatically adjusts the distribution of vnodes in all vgroups of the cluster, which is equivalent to load balancing the data of the cluster at the vnode level.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Table
|
||||
description: This document describes how to create and perform operations on standard tables and subtables.
|
||||
---
|
||||
|
||||
## Create Table
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Supertable
|
||||
title: Supertable
|
||||
sidebar_label: Supertable
|
||||
description: This document describes how to create and perform operations on supertables.
|
||||
---
|
||||
|
||||
## Create a Supertable
|
||||
|
@ -12,12 +13,11 @@ create_definition:
|
|||
col_name column_definition
|
||||
|
||||
column_definition:
|
||||
type_name [COMMENT 'string_value']
|
||||
type_name
|
||||
```
|
||||
|
||||
**More explanations**
|
||||
- Each supertable can have a maximum of 4096 columns, including tags. The minimum number of columns is 3: a timestamp column used as the key, one tag column, and one data column.
|
||||
- When you create a supertable, you can add comments to columns and tags.
|
||||
- The TAGS keyword defines the tag columns for the supertable. The following restrictions apply to tag columns:
|
||||
- A tag column can use the TIMESTAMP data type, but the values in the column must be fixed numbers. Timestamps including formulae, such as "now + 10s", cannot be stored in a tag column.
|
||||
- The name of a tag column cannot be the same as the name of any other column.
|
||||
|
@ -33,7 +33,7 @@ column_definition:
|
|||
SHOW STABLES [LIKE tb_name_wildcard];
|
||||
```
|
||||
|
||||
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtabels for each supertable.
|
||||
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
|
||||
|
||||
### View the CREATE Statement for a Supertable
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Insert
|
||||
title: Insert
|
||||
sidebar_label: Insert
|
||||
description: This document describes how to insert data into TDengine.
|
||||
---
|
||||
|
||||
## Syntax
|
||||
|
@ -27,7 +28,7 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
|||
2. The precision of a timestamp depends on its format. The precision configured for the database affects only timestamps that are inserted as long integers (UNIX time). Timestamps inserted as date and time strings are not affected. As an example, the timestamp 2021-07-13 16:16:48 is equivalent to 1626164208 in UNIX time. This UNIX time is modified to 1626164208000 for databases with millisecond precision, 1626164208000000 for databases with microsecond precision, and 1626164208000000000 for databases with nanosecond precision.
|
||||
|
||||
3. If you want to insert multiple rows simultaneously, do not use the NOW function in the timestamp. Using the NOW function in this situation will cause multiple rows to have the same timestamp and prevent them from being stored correctly. This is because the NOW function obtains the current time on the client, and multiple instances of NOW in a single statement will return the same time.
|
||||
The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter. The latest timestamp that you can use when inserting data is equal to the current time on the server plus the value of the DURATION parameter. You can configure the KEEP and DURATION parameters when you create a database. The default values are 3650 days for the KEEP parameter and 10 days for the DURATION parameter.
|
||||
The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter (You can configure the KEEP parameter when you create a database and the default value is 3650 days). The latest timestamp you can use when inserting data depends on the PRECISION parameter (You can configure the PRECISION parameter when you create a database, ms means milliseconds, us means microseconds, ns means nanoseconds, and the default value is milliseconds). If the timestamp precision is milliseconds or microseconds, the latest timestamp is the Unix epoch (January 1st, 1970 at 00:00:00.000 UTC) plus 1000 years, that is, January 1st, 2970 at 00:00:00.000 UTC; If the timestamp precision is nanoseconds, the latest timestamp is the Unix epoch plus 292 years, that is, January 1st, 2262 at 00:00:00.000000000 UTC.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Select
|
||||
title: Select
|
||||
sidebar_label: Select
|
||||
description: This document describes how to query data in TDengine.
|
||||
---
|
||||
|
||||
## Syntax
|
||||
|
@ -247,13 +248,13 @@ You can also use the NULLS keyword to specify the position of null values. Ascen
|
|||
|
||||
The LIMIT keyword controls the number of results that are displayed. You can also use the OFFSET keyword to specify the result to display first. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. You can include an offset in a LIMIT clause. For example, LIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh results.
|
||||
|
||||
In a statement that includes a PARTITON BY clause, the LIMIT keyword is performed on each partition, not on the entire set of results.
|
||||
In a statement that includes a PARTITION BY/GROUP BY clause, the LIMIT keyword is performed on each partition/group, not on the entire set of results.
|
||||
|
||||
## SLIMIT
|
||||
|
||||
The SLIMIT keyword is used with a PARTITION BY clause to control the number of partitions that are displayed. You can include an offset in a SLIMIT clause. For example, SLIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh partitions.
|
||||
The SLIMIT keyword is used with a PARTITION BY/GROUP BY clause to control the number of partitions/groups that are displayed. You can include an offset in a SLIMIT clause. For example, SLIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh partitions/groups.
|
||||
|
||||
Note: If you include an ORDER BY clause, only one partition can be displayed.
|
||||
Note: If you include an ORDER BY clause, only one partition/group can be displayed.
|
||||
|
||||
## Special Query
|
||||
|
||||
|
@ -354,9 +355,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN
|
||||
|
||||
TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table.
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
||||
|
||||
For standard tables, only the timestamp (primary key) can be used in join operations. For example:
|
||||
For standard tables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -364,7 +365,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
|||
WHERE t1.ts = t2.ts
|
||||
```
|
||||
|
||||
For supertables, tags as well as timestamps can be used in join operations. For example:
|
||||
For supertables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -372,21 +373,16 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
For sub-table and super table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM temp_ctable t1, temp_stable t2
|
||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
Similarly, join operations can be performed on the result sets of multiple subqueries.
|
||||
|
||||
:::note
|
||||
|
||||
The following restriction apply to JOIN statements:
|
||||
|
||||
- The number of tables or supertables in a single join operation cannot exceed 10.
|
||||
- `FILL` cannot be used in a JOIN statement.
|
||||
- Arithmetic operations cannot be performed on the result sets of join operation.
|
||||
- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation.
|
||||
- `OR` cannot be used in the conditions for join operation
|
||||
- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns.
|
||||
|
||||
:::
|
||||
|
||||
## Nested Query
|
||||
|
||||
Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
sidebar_label: Tag Index
|
||||
title: Tag Index
|
||||
description: Use Tag Index to Improve Query Performance
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
Prior to TDengine 3.0.3.0 (excluded),only one index is created by default on the first tag of each super talbe, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
||||
|
||||
## Syntax
|
||||
|
||||
1. The syntax of creating an index
|
||||
|
||||
```sql
|
||||
CREATE INDEX index_name ON tbl_name (tagColName)
|
||||
```
|
||||
|
||||
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
|
||||
|
||||
2. The syntax of drop an index
|
||||
|
||||
```sql
|
||||
DROP INDEX index_name
|
||||
```
|
||||
|
||||
In the above statement, `index_name` is the name of an existing index. If the index doesn't exist, the command would fail but doesn't generate any impact to the system.
|
||||
|
||||
3. The syntax of show indexes in the system
|
||||
|
||||
```sql
|
||||
SELECT * FROM information_schema.INS_INDEXES
|
||||
```
|
||||
|
||||
You can also add filter conditions to limit the results.
|
||||
|
||||
## Detailed Specification
|
||||
|
||||
1. Indexes can improve query performance significantly if they are used properly. The operators supported by tag index include `=`, `>`, `>=`, `<`, `<=`. If you use these operators with tags, indexes can improve query performance significantly. However, for operators not in this scope, indexes don't help. More and more operators will be added in future.
|
||||
|
||||
2. Only one index can be created on each tag, error would be reported if you try to create more than one indexes on same tag.
|
||||
|
||||
3. Each time you can create an index on a single tag, you are not allowed to create indexes on multiple tags together.
|
||||
|
||||
4. The name of each index must be unique across the whole system, regardless of the type of the index, e.g. tag index or sma index.
|
||||
|
||||
5. There is no limit on the number of indexes, but each index may add some burden on the metadata subsystem. So too many indexes may decrease the efficiency of reading or writing metadata and then decrease the system performance. So it's better not to add unnecessary indexes.
|
||||
|
||||
6. You can' create index on a normal table or a child table.
|
||||
|
||||
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Delete Data
|
||||
description: "Delete data from table or Stable"
|
||||
title: Delete Data
|
||||
sidebar_label: Delete Data
|
||||
description: This document describes how to delete data from TDengine.
|
||||
---
|
||||
|
||||
TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Functions
|
||||
title: Functions
|
||||
sidebar_label: Functions
|
||||
description: This document describes the standard SQL functions available in TDengine.
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
|
@ -665,13 +666,13 @@ If you input a specific column, the number of non-null values in the column is r
|
|||
ELAPSED(ts_primary_key [, time_unit])
|
||||
```
|
||||
|
||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||
|
||||
**Return value type**: Double if the input value is not NULL;
|
||||
|
||||
**Return value type**: TIMESTAMP
|
||||
|
||||
**Applicable tables**: table, STable, outter in nested query
|
||||
**Applicable tables**: table, STable, outer in nested query
|
||||
|
||||
**Explanations**:
|
||||
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||
|
@ -753,7 +754,7 @@ HYPERLOGLOG(expr)
|
|||
|
||||
**Description**:
|
||||
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
|
||||
However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case.
|
||||
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
|
||||
|
||||
**Return value type**: Integer
|
||||
|
||||
|
@ -795,19 +796,23 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
|
|||
### PERCENTILE
|
||||
|
||||
```sql
|
||||
PERCENTILE(expr, p)
|
||||
PERCENTILE(expr, p [, p1] ...)
|
||||
```
|
||||
|
||||
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
|
||||
|
||||
**Return value type**: DOUBLE
|
||||
**Return value type**: This function takes 2 minimum and 11 maximum parameters, and it can simultaneously return 10 percentiles at most. If 2 parameters are given, a single percentile is returned and the value type is DOUBLE.
|
||||
If more than 2 parameters are given, the return value type is a VARCHAR string, the format of which is a JSON ARRAY containing all return values.
|
||||
|
||||
**Applicable column types**: Numeric
|
||||
|
||||
**Applicable table types**: table only
|
||||
|
||||
**More explanations**: _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||
**More explanations**:
|
||||
|
||||
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||
- When calculating multiple percentiles of a specific column, a single PERCENTILE function with multiple parameters is advised, as this can largely reduce the query response time.
|
||||
For example, using SELECT percentile(col, 90, 95, 99) FROM table will perform better than SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table.
|
||||
|
||||
## Selection Functions
|
||||
|
||||
|
@ -876,7 +881,17 @@ INTERP(expr)
|
|||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||
|
||||
**Example**
|
||||
|
||||
- We use the smart meters example used in this documentation to illustrate how to use the INTERP function.
|
||||
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
|
||||
|
||||
```sql
|
||||
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
|
||||
```
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Time-Series Extensions
|
||||
title: Time-Series Extensions
|
||||
sidebar_label: Time-Series Extensions
|
||||
description: This document describes the extended functions specific to time-series data processing available in TDengine.
|
||||
---
|
||||
|
||||
As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL.
|
||||
|
@ -20,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct
|
|||
A PARTITION BY clause is processed as follows:
|
||||
|
||||
- The PARTITION BY clause must occur after the WHERE clause
|
||||
- The PARTITION BY caluse partitions the data according to the specified dimentions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
||||
- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
||||
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
|
||||
|
||||
```sql
|
||||
|
@ -31,15 +32,15 @@ The most common usage of PARTITION BY is partitioning the data in subtables by t
|
|||
|
||||
## Windowed Queries
|
||||
|
||||
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. The query syntax is as follows:
|
||||
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are four kinds of windows: time window, status window, session window, and event window. There are two kinds of time windows: sliding window and flip time/tumbling window. The syntax of window clause is as follows:
|
||||
|
||||
```sql
|
||||
SELECT select_list FROM tb_name
|
||||
[WHERE where_condition]
|
||||
[SESSION(ts_col, tol_val)]
|
||||
[STATE_WINDOW(col)]
|
||||
[INTERVAL(interval [, offset]) [SLIDING sliding]]
|
||||
[FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
|
||||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval [, offset]) [SLIDING sliding] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
}
|
||||
```
|
||||
|
||||
The following restrictions apply:
|
||||
|
@ -74,6 +75,16 @@ These pseudocolumns occur after the aggregation clause.
|
|||
5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||
6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)`
|
||||
|
||||
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
|
||||
|
||||
1. NULL_F: Fill `NULL` by force
|
||||
2. VALUE_F: Fill `VALUE` by force
|
||||
|
||||
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
||||
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force;`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
||||
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
|
||||
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
|
||||
|
||||
:::info
|
||||
|
||||
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
|
||||
|
@ -104,7 +115,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
|||
|
||||
When using time windows, note the following:
|
||||
|
||||
- The window length for aggregation depends on the value of INTERVAL. The minimum interval is 10 ms. You can configure a window as an offset from UTC 0:00. The offset cannot be smaler than the interval. You can use SLIDING to specify the length of time that the window moves forward.
|
||||
- The window length for aggregation depends on the value of INTERVAL. The minimum interval is 10 ms. You can configure a window as an offset from UTC 0:00. The offset cannot be smaller than the interval. You can use SLIDING to specify the length of time that the window moves forward.
|
||||
Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
|
||||
- The result set is in ascending order of timestamp when you aggregate by time window.
|
||||
|
||||
|
@ -145,6 +156,26 @@ If the time interval between two continuous rows are within the time interval sp
|
|||
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
|
||||
```
|
||||
|
||||
### Event Window
|
||||
|
||||
Event window is determined according to the window start condition and the window close condition. The window is started when `start_trigger_condition` is evaluated to true, the window is closed when `end_trigger_condition` is evaluated to true. `start_trigger_condition` and `end_trigger_condition` can be any conditional expressions supported by TDengine and can include multiple columns.
|
||||
|
||||
There may be only one row of data in an event window, when a row meets both the `start_trigger_condition` and the `end_trigger_condition`.
|
||||
|
||||
The window is treated as invalid or non-existing if the `end_trigger_condition` can't be met. There will be no output in case that a window can't be closed.
|
||||
|
||||
If the event window query is performed on a super table, TDengine consolidates all the data of all child tables into a single timeline then perform event window based query.
|
||||
|
||||
If you want to perform event window based query on the result set of a sub-query, the result set of the sub-query should be arranged in the order of timestamp and include the column of timestamp.
|
||||
|
||||
For example, the diagram below illustrates the event windows generated by the query below:
|
||||
|
||||
```sql
|
||||
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Examples
|
||||
|
||||
A table of intelligent meters can be created by the SQL statement below:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Data Subscription
|
||||
title: Data Subscription
|
||||
sidebar_label: Data Subscription
|
||||
description: This document describes the SQL statements related to the data subscription component of TDengine.
|
||||
---
|
||||
|
||||
The information in this document is related to the TDengine data subscription feature.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Stream Processing
|
||||
title: Stream Processing
|
||||
sidebar_label: Stream Processing
|
||||
description: This document describes the SQL statements related to the stream processing component of TDengine.
|
||||
---
|
||||
|
||||
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs.
|
||||
|
@ -108,7 +109,7 @@ SHOW STREAMS;
|
|||
|
||||
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
||||
|
||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering:
|
||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering,the default value is AT_ONCE:
|
||||
|
||||
1. AT_ONCE: triggers on write
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Operators
|
||||
title: Operators
|
||||
sidebar_label: Operators
|
||||
description: This document describes the SQL operators available in TDengine.
|
||||
---
|
||||
|
||||
## Arithmetic Operators
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: JSON Type
|
||||
title: JSON Type
|
||||
sidebar_label: JSON Type
|
||||
description: This document describes the JSON data type in TDengine.
|
||||
---
|
||||
|
||||
|
||||
|
@ -54,7 +55,7 @@ title: JSON Type
|
|||
|
||||
4. Tag Operations
|
||||
|
||||
The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
|
||||
The value of a JSON tag can be altered. Please note that the full JSON will be overridden when doing this.
|
||||
|
||||
The name of a JSON tag can be altered.
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Escape Characters
|
||||
description: This document describes the usage of escape characters in TDengine.
|
||||
---
|
||||
|
||||
## Escape Characters
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Name and Size Limits
|
||||
title: Name and Size Limits
|
||||
sidebar_label: Name and Size Limits
|
||||
description: This document describes the name and size limits in TDengine.
|
||||
---
|
||||
|
||||
## Naming Rules
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Reserved Keywords
|
||||
title: Reserved Keywords
|
||||
sidebar_label: Reserved Keywords
|
||||
description: This document describes the reserved keywords in TDengine that cannot be used in object names.
|
||||
---
|
||||
|
||||
## Keyword List
|
||||
|
@ -17,6 +18,7 @@ The following list shows all reserved keywords:
|
|||
- ADD
|
||||
- AFTER
|
||||
- AGGREGATE
|
||||
- ALIVE
|
||||
- ALL
|
||||
- ALTER
|
||||
- ANALYZE
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Cluster
|
||||
title: Cluster
|
||||
sidebar_label: Cluster
|
||||
description: This document describes the SQL statements related to cluster management in TDengine.
|
||||
---
|
||||
|
||||
The physical entities that form TDengine clusters are known as data nodes (dnodes). Each dnode is a process running on the operating system of the physical machine. Dnodes can contain virtual nodes (vnodes), which store time-series data. Virtual nodes are formed into vgroups, which have 1 or 3 vnodes depending on the replica setting. If you want to enable replication on your cluster, it must contain at least three nodes. Dnodes can also contain management nodes (mnodes). Each cluster has up to three mnodes. Finally, dnodes can contain query nodes (qnodes), which compute time-series data, thus separating compute from storage. A single dnode can contain a vnode, qnode, and mnode.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Metadata
|
||||
title: Information_Schema Database
|
||||
sidebar_label: Metadata
|
||||
description: This document describes how to use the INFORMATION_SCHEMA database in TDengine.
|
||||
---
|
||||
|
||||
TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide access to database metadata, system information, and status information. This information includes database names, table names, and currently running SQL statements. All information related to TDengine maintenance is stored in this database. It contains several read-only tables. These tables are more accurately described as views, and they do not correspond to specific files. You can query these tables but cannot write data to them. The INFORMATION_SCHEMA database is intended to provide a unified method for SHOW commands to access data. However, using SELECT ... FROM INFORMATION_SCHEMA.tablename offers several advantages over SHOW commands:
|
||||
|
@ -178,6 +179,20 @@ Provides information about standard tables and subtables.
|
|||
| 5 | tag_type | BINARY(64) | Tag type |
|
||||
| 6 | tag_value | BINARY(16384) | Tag value |
|
||||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------- | ---------------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
| 3 | table_type | BINARY(21) | Table type |
|
||||
| 4 | col_name | BINARY(64) | Column name |
|
||||
| 5 | col_type | BINARY(32) | Column type |
|
||||
| 6 | col_length | INT | Column length |
|
||||
| 7 | col_precision | INT | Column precision |
|
||||
| 8 | col_scale | INT | Column scale |
|
||||
| 9 | col_nullable | INT | Column nullable |
|
||||
|
||||
## INS_USERS
|
||||
|
||||
Provides information about TDengine users.
|
||||
|
@ -273,9 +288,9 @@ Provides dnode configuration information.
|
|||
| 1 | stream_name | BINARY(64) | Stream name |
|
||||
| 2 | create_time | TIMESTAMP | Creation time |
|
||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||
| 4 | status | BIANRY(20) | Current status |
|
||||
| 4 | status | BINARY(20) | Current status |
|
||||
| 5 | source_db | BINARY(64) | Source database |
|
||||
| 6 | target_db | BIANRY(64) | Target database |
|
||||
| 6 | target_db | BINARY(64) | Target database |
|
||||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Statistics
|
||||
title: Performance_Schema Database
|
||||
sidebar_label: Statistics
|
||||
description: This document describes how to use the PERFORMANCE_SCHEMA database in TDengine.
|
||||
---
|
||||
|
||||
TDengine includes a built-in database named `PERFORMANCE_SCHEMA` to provide access to database performance statistics. This document introduces the tables of PERFORMANCE_SCHEMA and their structure.
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
---
|
||||
sidebar_label: SHOW Statement
|
||||
title: SHOW Statement for Metadata
|
||||
sidebar_label: SHOW Statement
|
||||
description: This document describes how to use the SHOW statement in TDengine.
|
||||
---
|
||||
|
||||
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
|
||||
## SHOW APPS
|
||||
|
||||
|
@ -85,10 +86,10 @@ SHOW FUNCTIONS;
|
|||
|
||||
Shows all user-defined functions in the system.
|
||||
|
||||
## SHOW LICENSE
|
||||
## SHOW LICENCES
|
||||
|
||||
```sql
|
||||
SHOW LICENSE;
|
||||
SHOW LICENCES;
|
||||
SHOW GRANTS;
|
||||
```
|
||||
|
||||
|
@ -178,6 +179,141 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
Shows how table data is distributed.
|
||||
|
||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||
|
||||
```sql
|
||||
show table distributed d0\G;
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary> Show Example </summary>
|
||||
<pre><code>
|
||||
*************************** 1.row ***************************
|
||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
|
||||
|
||||
Total_Blocks : Table `d0` contains total 5 blocks
|
||||
|
||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||
|
||||
Average_size: The average size of each block is 18.73 KB
|
||||
|
||||
Compression_Ratio: The data compression rate is 23.98%
|
||||
|
||||
*************************** 2.row ***************************
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: Table `d0` contains 20,000 rows
|
||||
|
||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||
|
||||
MinRows: The minimum number of rows in a block is 3,616
|
||||
|
||||
MaxRows: The maximum number of rows in a block is 4,096B
|
||||
|
||||
Average_Rows: The average number of rows in a block is 4,000
|
||||
|
||||
*************************** 3.row ***************************
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||
|
||||
Total_Tables: The number of child tables, 1 in this example
|
||||
|
||||
Total_Files: The number of files storing the table's data, 2 in this example
|
||||
|
||||
*************************** 4.row ***************************
|
||||
|
||||
_block_dist: --------------------------------------------------------------------------------
|
||||
|
||||
*************************** 5.row ***************************
|
||||
|
||||
_block_dist: 0100 |
|
||||
|
||||
*************************** 6.row ***************************
|
||||
|
||||
_block_dist: 0299 |
|
||||
|
||||
*************************** 7.row ***************************
|
||||
|
||||
_block_dist: 0498 |
|
||||
|
||||
*************************** 8.row ***************************
|
||||
|
||||
_block_dist: 0697 |
|
||||
|
||||
*************************** 9.row ***************************
|
||||
|
||||
_block_dist: 0896 |
|
||||
|
||||
*************************** 10.row ***************************
|
||||
|
||||
_block_dist: 1095 |
|
||||
|
||||
*************************** 11.row ***************************
|
||||
|
||||
_block_dist: 1294 |
|
||||
|
||||
*************************** 12.row ***************************
|
||||
|
||||
_block_dist: 1493 |
|
||||
|
||||
*************************** 13.row ***************************
|
||||
|
||||
_block_dist: 1692 |
|
||||
|
||||
*************************** 14.row ***************************
|
||||
|
||||
_block_dist: 1891 |
|
||||
|
||||
*************************** 15.row ***************************
|
||||
|
||||
_block_dist: 2090 |
|
||||
|
||||
*************************** 16.row ***************************
|
||||
|
||||
_block_dist: 2289 |
|
||||
|
||||
*************************** 17.row ***************************
|
||||
|
||||
_block_dist: 2488 |
|
||||
|
||||
*************************** 18.row ***************************
|
||||
|
||||
_block_dist: 2687 |
|
||||
|
||||
*************************** 19.row ***************************
|
||||
|
||||
_block_dist: 2886 |
|
||||
|
||||
*************************** 20.row ***************************
|
||||
|
||||
_block_dist: 3085 |
|
||||
|
||||
*************************** 21.row ***************************
|
||||
|
||||
_block_dist: 3284 |
|
||||
|
||||
*************************** 22.row ***************************
|
||||
|
||||
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
|
||||
|
||||
*************************** 23.row ***************************
|
||||
|
||||
_block_dist: 3682 |
|
||||
|
||||
*************************** 24.row ***************************
|
||||
|
||||
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
|
||||
|
||||
Query OK, 24 row(s) in set (0.002444s)
|
||||
|
||||
</code></pre>
|
||||
</details>
|
||||
|
||||
The above show the block distribution percentage according to the number of rows in each block. In the above example, we can get below information:
|
||||
- `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681.
|
||||
- `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. - The number of blocks whose rows fall in other range is zero.
|
||||
|
||||
Note that only the information about the data blocks in the data file will be displayed here, and the information about the data in the stt file will not be displayed.
|
||||
|
||||
## SHOW TAGS
|
||||
|
||||
```sql
|
||||
|
@ -225,12 +361,12 @@ Shows the working configuration of the parameters that must be the same on each
|
|||
SHOW [db_name.]VGROUPS;
|
||||
```
|
||||
|
||||
Shows information about all vgroups in the system or about the vgroups for a specified database.
|
||||
Shows information about all vgroups in the current database.
|
||||
|
||||
## SHOW VNODES
|
||||
|
||||
```sql
|
||||
SHOW VNODES [dnode_name];
|
||||
SHOW VNODES {dnode_id | dnode_endpoint};
|
||||
```
|
||||
|
||||
Shows information about all vnodes in the system or about the vnodes for a specified dnode.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Access Control
|
||||
title: User and Access Control
|
||||
description: Manage user and user's permission
|
||||
sidebar_label: Access Control
|
||||
description: This document describes how to manage users and permissions in TDengine.
|
||||
---
|
||||
|
||||
This document describes how to manage permissions in TDengine.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: User-Defined Functions
|
||||
title: User-Defined Functions (UDF)
|
||||
sidebar_label: User-Defined Functions
|
||||
description: This document describes the SQL statements related to user-defined functions (UDF) in TDengine.
|
||||
---
|
||||
|
||||
You can create user-defined functions and import them into TDengine.
|
||||
|
@ -40,7 +41,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
|
|||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
||||
```
|
||||
For more information about user-defined functions, see [User-Defined Functions](../../develop/udf).
|
||||
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
|
||||
|
||||
## Manage UDF
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Index
|
||||
title: Using Indices
|
||||
title: Indexing
|
||||
sidebar_label: Indexing
|
||||
description: This document describes the SQL statements related to indexing in TDengine.
|
||||
---
|
||||
|
||||
TDengine supports SMA and FULLTEXT indexing.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Error Recovery
|
||||
title: Error Recovery
|
||||
sidebar_label: Error Recovery
|
||||
description: This document describes the SQL statements related to error recovery in TDengine.
|
||||
---
|
||||
|
||||
In a complex environment, connections and query tasks may encounter errors or fail to return in a reasonable time. If this occurs, you can terminate the connection or task.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Changes in TDengine 3.0
|
||||
title: Changes in TDengine 3.0
|
||||
description: "This document explains how TDengine SQL has changed in version 3.0."
|
||||
sidebar_label: Changes in TDengine 3.0
|
||||
description: This document describes how TDengine SQL has changed in version 3.0 compared with previous versions.
|
||||
---
|
||||
|
||||
## Basic SQL Elements
|
||||
|
@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
|
|||
| - | :------- | :-------- | :------- |
|
||||
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
|
||||
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. STRICT is now used to specify strong or weak consistency. The STRICT parameter cannot be modified. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
|
||||
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
|
||||
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
|
||||
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
|
||||
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
||||
|
@ -54,7 +54,6 @@ The following data types can be used in the schema for standard tables.
|
|||
| 27 | GRANT | Added | Grants permissions to a user.
|
||||
| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction.
|
||||
| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature.
|
||||
| 30 | MERGE VGROUP | Added | Merges vgroups.
|
||||
| 31 | REVOKE | Added | Revokes permissions from a user.
|
||||
| 32 | SELECT | Modified | <ul><li>SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause. </li><li>DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY. </li><li>JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables. </li><li>Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated. </li><li>All scalar functions can be used after WHERE. </li><li>GROUP BY is enhanced. You can group by any scalar expression or combination thereof. </li><li>SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used. </li><li>Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags. </li></ul>
|
||||
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
|
@ -76,8 +75,9 @@ The following data types can be used in the schema for standard tables.
|
|||
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
|
||||
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.
|
||||
| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported.
|
||||
| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups.
|
||||
| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 52 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 53 | REDISTRIBUTE VGROUP | Added | Adjust the distribution of VNODES in VGROUP.
|
||||
| 54 | BALANCE VGROUP | Added | Auto adjust the distribution of VNODES in VGROUP.
|
||||
|
||||
## SQL Functions
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 210 KiB |
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: TDengine SQL
|
||||
description: 'The syntax supported by TDengine SQL '
|
||||
description: This document describes the syntax and functions supported by TDengine SQL.
|
||||
---
|
||||
|
||||
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Install and Uninstall
|
||||
description: Install, Uninstall, Start, Stop and Upgrade
|
||||
description: This document describes how to install, upgrade, and uninstall TDengine.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -15,14 +15,14 @@ About details of installing TDenine, please refer to [Installation Guide](../../
|
|||
## Uninstall
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="Uninstall apt-get" value="aptremove">
|
||||
<TabItem label="Uninstall by apt-get" value="aptremove">
|
||||
|
||||
Apt-get package of TDengine can be uninstalled as below:
|
||||
Uninstall package of TDengine by apt-get can be uninstalled as below:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get remove tdengine
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
tdengine
|
||||
|
@ -35,7 +35,7 @@ TDengine is removed successfully!
|
|||
|
||||
```
|
||||
|
||||
Apt-get package of taosTools can be uninstalled as below:
|
||||
If you have installed taos-tools, please uninstall it first before uninstall TDengine. The command of uninstall is following:
|
||||
|
||||
```
|
||||
$ sudo apt remove taostools
|
||||
|
@ -111,8 +111,20 @@ taos tools is uninstalled successfully!
|
|||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows uninstall" value="windows">
|
||||
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Mac uninstall" value="mac">
|
||||
|
||||
TDengine can be uninstalled as below:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -150,13 +162,13 @@ There are two aspects in upgrade operation: upgrade installation package and upg
|
|||
|
||||
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
|
||||
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 2 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
- Stop inserting data
|
||||
- Make sure all data is persisted to disk
|
||||
- Make sure all data is persisted to disk, please use command `flush database`
|
||||
- Stop the cluster of TDengine
|
||||
- Uninstall old version and install new version
|
||||
- Start the cluster of TDengine
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Run some simple data insertion statements to make sure the cluster works well
|
||||
- Restore business services
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Resource Planning
|
||||
title: Resource Planning
|
||||
sidebar_label: Resource Planning
|
||||
description: This document describes how to plan compute and storage resources for your TDengine cluster.
|
||||
---
|
||||
|
||||
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Fault Tolerance and Disaster Recovery
|
||||
description: This document describes how TDengine provides fault tolerance and disaster recovery.
|
||||
---
|
||||
|
||||
## Fault Tolerance
|
||||
|
@ -17,14 +18,8 @@ To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0
|
|||
|
||||
## Disaster Recovery
|
||||
|
||||
TDengine uses replication to provide high availability.
|
||||
TDengine provides disaster recovery by using taosX to replicate data between two TDengine clusters which are deployed in two distant data centers. Assume there are two TDengine clusters, A and B, A is the source and B is the target, and A takes the workload of writing and querying. You can deploy `taosX` in the data center where cluster A resides in, `taosX` consumes the data written into cluster A and writes into cluster B. If the data center of cluster A is disrupted because of disaster, you can switch to cluster B to take the workload of data writing and querying, and deploy a `taosX` in the data center of cluster B to replicate data from cluster B to cluster A if cluster A has been recovered, or another cluster C if cluster A has not been recovered.
|
||||
|
||||
A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
|
||||
You can use the data replication feature of `taosX` to build more complicated disaster recovery solution.
|
||||
|
||||
The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3.
|
||||
|
||||
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
|
||||
|
||||
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
|
||||
|
||||
Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com.
|
||||
taosX is only provided in TDengine enterprise edition, for more details please contact business@tdengine.com.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Data Import
|
||||
description: This document describes how to import data into TDengine.
|
||||
---
|
||||
|
||||
There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Data Export
|
||||
description: This document describes how to export data from TDengine.
|
||||
---
|
||||
|
||||
There are two ways of exporting data from a TDengine cluster:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: TDengine Monitoring
|
||||
description: This document describes how to monitor your TDengine cluster.
|
||||
---
|
||||
|
||||
After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Problem Diagnostics
|
||||
description: This document describes how to diagnose issues with your TDengine cluster.
|
||||
---
|
||||
|
||||
## Network Connection Diagnostics
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Administration
|
||||
description: This document describes how to perform management operations on your TDengine cluster from an administrator's perspective.
|
||||
---
|
||||
|
||||
This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: REST API
|
||||
description: This document describes the TDengine REST API.
|
||||
---
|
||||
|
||||
To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database.
|
||||
|
@ -67,7 +68,7 @@ The following return value results indicate that the verification passed.
|
|||
## HTTP request URL format
|
||||
|
||||
```text
|
||||
http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone]
|
||||
http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
|
||||
```
|
||||
|
||||
Parameter Description:
|
||||
|
@ -76,6 +77,7 @@ Parameter Description:
|
|||
- port: httpPort configuration item in the configuration file, default is 6041.
|
||||
- db_name: Optional parameter that specifies the default database name for the executed SQL command.
|
||||
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
|
||||
- req_id: Optional parameter that specifies the request id for tracing.
|
||||
|
||||
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
|
||||
|
||||
|
@ -98,13 +100,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL
|
|||
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
|
||||
|
||||
```bash
|
||||
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone]
|
||||
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone]
|
||||
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
|
||||
```
|
||||
|
||||
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`..
|
||||
|
@ -113,14 +115,41 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.
|
|||
|
||||
### HTTP Response Code
|
||||
|
||||
| **Response Code** | **Description** |
|
||||
|-------------------|----------------|
|
||||
| 200 | Success. (Also used for C interface errors.) |
|
||||
| 400 | Parameter error |
|
||||
| 401 | Authentication failure |
|
||||
| 404 | Interface not found |
|
||||
| 500 | Internal error |
|
||||
| 503 | Insufficient system resources |
|
||||
Starting from `TDengine 3.0.3.0`, `taosAdapter` provides a configuration parameter `httpCodeServerError` to set whether to return a non-200 http status code when the C interface returns an error
|
||||
|
||||
| **Description** | **httpCodeServerError false** | **httpCodeServerError true** |
|
||||
|--------------------|---------------------------- ------|---------------------------------------|
|
||||
| taos_errno() returns 0 | 200 | 200 |
|
||||
| taos_errno() returns non-0 | 200 (except authentication error) | 500 (except authentication error and 400/502 error) |
|
||||
| Parameter error | 400 (only handle HTTP request URL parameter error) | 400 (handle HTTP request URL parameter error and taosd return error) |
|
||||
| Authentication error | 401 | 401 |
|
||||
| Interface does not exist | 404 | 404 |
|
||||
| Cluster unavailable error | 502 | 502 |
|
||||
| Insufficient system resources | 503 | 503 |
|
||||
|
||||
The C error codes that return http code 400 are:
|
||||
|
||||
- TSDB_CODE_TSC_SQL_SYNTAX_ERROR ( 0x0216 )
|
||||
- TSDB_CODE_TSC_LINE_SYNTAX_ERROR (0x021B)
|
||||
- TSDB_CODE_PAR_SYNTAX_ERROR (0x2600)
|
||||
- TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE (0x060B)
|
||||
- TSDB_CODE_TSC_VALUE_OUT_OF_RANGE (0x0224)
|
||||
- TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE (0x263B)
|
||||
|
||||
The error code that returns http code 401 are:
|
||||
|
||||
- TSDB_CODE_MND_USER_ALREADY_EXIST (0x0350)
|
||||
- TSDB_CODE_MND_USER_NOT_EXIST (0x0351)
|
||||
- TSDB_CODE_MND_INVALID_USER_FORMAT (0x0352)
|
||||
- TSDB_CODE_MND_INVALID_PASS_FORMAT (0x0353)
|
||||
- TSDB_CODE_MND_NO_USER_FROM_CONN (0x0354)
|
||||
- TSDB_CODE_MND_TOO_MANY_USERS (0x0355)
|
||||
- TSDB_CODE_MND_INVALID_ALTER_OPER (0x0356)
|
||||
- TSDB_CODE_MND_AUTH_FAILURE (0x0357)
|
||||
|
||||
The error code that returns http code 403 are:
|
||||
|
||||
- TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED (0x0020)
|
||||
|
||||
### HTTP body structure
|
||||
|
||||
|
@ -268,7 +297,6 @@ Response body:
|
|||
|
||||
```json
|
||||
{
|
||||
"status": "succ",
|
||||
"code": 0,
|
||||
"desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"
|
||||
}
|
||||
|
@ -354,6 +382,130 @@ Response body:
|
|||
}
|
||||
```
|
||||
|
||||
## REST API between TDengine 2.x and 3.0
|
||||
|
||||
### URI
|
||||
|
||||
| URI | TDengine 2.x | TDengine 3.0 |
|
||||
| :--------------------| :------------------: | :--------------------------------------------------: |
|
||||
| /rest/sql | Supported | Supported (with different response code and body) |
|
||||
| /rest/sqlt | Supported | No more supported |
|
||||
| /rest/sqlutc | Supported | No more supported |
|
||||
|
||||
### HTTP code
|
||||
|
||||
| HTTP code | TDengine 2.x | TDengine 3.0 | note |
|
||||
| :--------------------| :------------------: | :----------: | :-----------------------------------: |
|
||||
| 200 | Supported | Supported | Success or taosc return error |
|
||||
| 400 | Not supported | Supported | Parameter error |
|
||||
| 401 | Not supported | Supported | Authentication failure |
|
||||
| 404 | Supported | Supported | URI not exist |
|
||||
| 500 | Not supported | Supported | Internal error |
|
||||
| 503 | Supported | Supported | Insufficient system resources |
|
||||
|
||||
### Response body
|
||||
|
||||
#### REST response body return from TDengine 2.x
|
||||
|
||||
```JSON
|
||||
{
|
||||
"status": "succ",
|
||||
"head": [
|
||||
"name",
|
||||
"created_time",
|
||||
"ntables",
|
||||
"vgroups",
|
||||
"replica",
|
||||
"quorum",
|
||||
"days",
|
||||
"keep1,keep2,keep(D)",
|
||||
"cache(MB)",
|
||||
"blocks",
|
||||
"minrows",
|
||||
"maxrows",
|
||||
"wallevel",
|
||||
"fsync",
|
||||
"comp",
|
||||
"precision",
|
||||
"status"
|
||||
],
|
||||
"data": [
|
||||
[
|
||||
"log",
|
||||
"2020-09-02 17:23:00.039",
|
||||
4,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
"30,30,30",
|
||||
1,
|
||||
3,
|
||||
100,
|
||||
4096,
|
||||
1,
|
||||
3000,
|
||||
2,
|
||||
"us",
|
||||
"ready"
|
||||
]
|
||||
],
|
||||
"rows": 1
|
||||
}
|
||||
```
|
||||
```
|
||||
"data": [
|
||||
[
|
||||
"information_schema",
|
||||
16,
|
||||
"ready"
|
||||
],
|
||||
[
|
||||
"performance_schema",
|
||||
9,
|
||||
"ready"
|
||||
]
|
||||
],
|
||||
```
|
||||
|
||||
#### REST response body return from TDengine 3.0
|
||||
|
||||
```JSON
|
||||
{
|
||||
"code": 0,
|
||||
"column_meta": [
|
||||
[
|
||||
"name",
|
||||
"VARCHAR",
|
||||
64
|
||||
],
|
||||
[
|
||||
"ntables",
|
||||
"BIGINT",
|
||||
8
|
||||
],
|
||||
[
|
||||
"status",
|
||||
"VARCHAR",
|
||||
10
|
||||
]
|
||||
],
|
||||
"data": [
|
||||
[
|
||||
"information_schema",
|
||||
16,
|
||||
"ready"
|
||||
],
|
||||
[
|
||||
"performance_schema",
|
||||
9,
|
||||
"ready"
|
||||
]
|
||||
],
|
||||
"rows": 2
|
||||
}
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
[taosAdapter](/reference/taosadapter/)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: C/C++
|
||||
title: C/C++ Connector
|
||||
sidebar_label: C/C++
|
||||
description: This document describes the TDengine C/C++ connector.
|
||||
---
|
||||
|
||||
C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located.
|
||||
|
@ -175,6 +176,14 @@ The base API is used to do things like create database connections and provide a
|
|||
|
||||
Set the current default database to `db`.
|
||||
|
||||
- `int taos_get_current_db(TAOS *taos, char *database, int len, int *required)`
|
||||
|
||||
- The variables database and len are applied by the user outside and allocated space. The current database name and length will be assigned to database and len.
|
||||
- As long as the db name is not assigned to the database normally (including truncation), an error will be returned with the return value of -1, and then the user can use taos_errstr(NULL) to get error message.
|
||||
- If database==NULL or len<=0, returns an error, the space required to store the db (including the last '\0') in the variable required
|
||||
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
|
||||
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
|
||||
|
||||
- `void taos_close(TAOS *taos)`
|
||||
|
||||
Closes the connection, where `taos` is the handle returned by `taos_connect()`.
|
||||
|
@ -403,5 +412,17 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
|
||||
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
|
||||
|
||||
**Supported Versions**
|
||||
This feature interface is supported from version 2.3.0.0.
|
||||
schemaless 其他相关的接口
|
||||
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
|
||||
- `TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl)`
|
||||
- `TAOS_RES *taos_schemaless_insert_ttl_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl, int64_t reqid)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl, int64_t reqid)`
|
||||
|
||||
**Description**
|
||||
- The above seven interfaces are extension interfaces, which are mainly used to pass ttl and reqid parameters, and can be used as needed.
|
||||
- Withing _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
|
||||
- Withing _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
|
||||
- Withing _reqid interfaces can track the entire call chain by passing the reqid parameter.
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Java
|
||||
title: TDengine Java Connector
|
||||
description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors.
|
||||
sidebar_label: Java
|
||||
description: This document describes the TDengine Java Connector.
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
|
@ -300,7 +300,7 @@ stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int
|
|||
|
||||
> **Note**: If you do not use `use db` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as db.tb.
|
||||
|
||||
### 插入数据
|
||||
### Insert data
|
||||
|
||||
```java
|
||||
// insert data
|
||||
|
@ -696,6 +696,9 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- enable.auto.commit: Specifies whether to commit automatically.
|
||||
- group.id: consumer: Specifies the group that the consumer is in.
|
||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||
- httpConnectTimeout:WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout:socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||
|
||||
#### Subscribe to consume data
|
||||
|
@ -722,7 +725,12 @@ consumer.close()
|
|||
|
||||
For more information, see [Data Subscription](../../../develop/tmq).
|
||||
|
||||
### Usage examples
|
||||
#### Full Sample Code
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
In addition to the native connection, the Java Connector also supports subscribing via websocket.
|
||||
|
||||
```java
|
||||
public abstract class ConsumerLoop {
|
||||
|
@ -795,6 +803,87 @@ public abstract class ConsumerLoop {
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection">
|
||||
|
||||
```java
|
||||
public abstract class ConsumerLoop {
|
||||
private final TaosConsumer<ResultBean> consumer;
|
||||
private final List<String> topics;
|
||||
private final AtomicBoolean shutdown;
|
||||
private final CountDownLatch shutdownLatch;
|
||||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.type", "ws");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("group.id", "group2");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
this.shutdown = new AtomicBoolean(false);
|
||||
this.shutdownLatch = new CountDownLatch(1);
|
||||
}
|
||||
|
||||
public abstract void process(ResultBean result);
|
||||
|
||||
public void pollData() throws SQLException {
|
||||
try {
|
||||
consumer.subscribe(topics);
|
||||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
} finally {
|
||||
consumer.close();
|
||||
shutdownLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() throws InterruptedException {
|
||||
shutdown.set(true);
|
||||
shutdownLatch.await();
|
||||
}
|
||||
|
||||
public static class ResultDeserializer extends ReferenceDeserializer<ResultBean> {
|
||||
|
||||
}
|
||||
|
||||
public static class ResultBean {
|
||||
private Timestamp ts;
|
||||
private int speed;
|
||||
|
||||
public Timestamp getTs() {
|
||||
return ts;
|
||||
}
|
||||
|
||||
public void setTs(Timestamp ts) {
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
public int getSpeed() {
|
||||
return speed;
|
||||
}
|
||||
|
||||
public void setSpeed(int speed) {
|
||||
this.speed = speed;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
> **Note**: The value of value.deserializer should be adjusted based on the package path of the test environment.
|
||||
|
||||
### Use with connection pool
|
||||
|
||||
#### HikariCP
|
||||
|
@ -878,8 +967,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
|
||||
| taos-jdbcdriver version | major changes |
|
||||
| :---------------------: | :--------------------------------------------: |
|
||||
| 3.0.1 - 3.0.2 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use 3.0.2 in the JDK 8 environment |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||
| 3.0.0 | Support for TDengine 3.0 |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Go
|
||||
title: TDengine Go Connector
|
||||
sidebar_label: Go
|
||||
description: This document describes the TDengine Go connector.
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
|
@ -119,7 +120,7 @@ _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use t
|
|||
|
||||
Use `taosSql` as `driverName` and use a correct [DSN](#DSN) as `dataSourceName`, DSN supports the following parameters.
|
||||
|
||||
* configPath specifies the `taos.cfg` directory
|
||||
* cfg specifies the `taos.cfg` directory
|
||||
|
||||
For example:
|
||||
|
||||
|
@ -355,26 +356,29 @@ The `af` package encapsulates TDengine advanced functions such as connection man
|
|||
|
||||
#### Subscribe
|
||||
|
||||
* `func NewConsumer(conf *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
Creates consumer group.
|
||||
|
||||
* `func (c *Consumer) Subscribe(topics []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes a topic.
|
||||
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
Polling information.
|
||||
|
||||
* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error`
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) FreeMessage(message unsafe.Pointer)`
|
||||
|
||||
Free information.
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
@ -441,25 +445,36 @@ Close consumer.
|
|||
|
||||
### Subscribe via WebSocket
|
||||
|
||||
* `func NewConsumer(config *Config) (*Consumer, error)`
|
||||
* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)`
|
||||
|
||||
Creates consumer group.
|
||||
Creates consumer group.
|
||||
|
||||
* `func (c *Consumer) Subscribe(topic []string) error`
|
||||
* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Subscribes to topics.
|
||||
Subscribes a topic.
|
||||
|
||||
* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)`
|
||||
* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error`
|
||||
Note: `rebalanceCb` is reserved for compatibility purpose
|
||||
|
||||
Polling information.
|
||||
Subscribes to topics.
|
||||
|
||||
* `func (c *Consumer) Commit(messageID uint64) error`
|
||||
* `func (c *Consumer) Poll(timeoutMs int) tmq.Event`
|
||||
|
||||
Commit information.
|
||||
Polling information.
|
||||
|
||||
* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)`
|
||||
Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
Close consumer.
|
||||
Close consumer.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Rust
|
||||
title: TDengine Rust Connector
|
||||
sidebar_label: Rust
|
||||
description: This document describes the TDengine Rust connector.
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
|
@ -38,7 +39,7 @@ The Rust Connector is still under rapid development and is not guaranteed to be
|
|||
* Install the Rust development toolchain
|
||||
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
|
||||
|
||||
# Add taos dependency
|
||||
### Add taos dependency
|
||||
|
||||
Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows:
|
||||
|
||||
|
@ -281,7 +282,7 @@ In the application code, use `pool.get()? ` to get a connection object [Taos].
|
|||
let taos = pool.get()?;
|
||||
```
|
||||
|
||||
# Connectors
|
||||
### Connectors
|
||||
|
||||
The [Taos][struct.Taos] object provides an API to perform operations on multiple databases.
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: Python
|
||||
title: TDengine Python Connector
|
||||
description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas."
|
||||
sidebar_label: Python
|
||||
description: This document describes taospy, the TDengine Python connector.
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -10,10 +10,11 @@ import TabItem from "@theme/TabItem";
|
|||
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
|
||||
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
|
||||
|
||||
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
|
||||
`taos-ws-py` is an optional package to enable using WebSocket to connect TDengine.
|
||||
|
||||
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST or WebSocket interface provided by taosAdapter is referred to hereinafter as a "REST connection" or "WebSocket connection".
|
||||
|
||||
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
|
||||
|
||||
## Supported platforms
|
||||
|
||||
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
|
||||
|
@ -32,7 +33,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
|||
|
||||
### Preparation
|
||||
|
||||
1. Install Python. Python >= 3.7 is recommended. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
|
||||
|
||||
|
@ -78,6 +79,22 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Install `taos-ws-py` (Optional)
|
||||
|
||||
The taos-ws-py package provides the way to access TDengine via WebSocket.
|
||||
|
||||
##### Install taos-ws-py with taospy
|
||||
|
||||
```bash
|
||||
pip3 install taospy[ws]
|
||||
```
|
||||
|
||||
##### Install taos-ws-py only
|
||||
|
||||
```bash
|
||||
pip3 install taos-ws-py
|
||||
```
|
||||
|
||||
### Verify
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
|
@ -98,6 +115,15 @@ For REST connections, verifying that the `taosrest` module can be imported succe
|
|||
import taosrest
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection">
|
||||
|
||||
For WebSocket connection, verifying that the `taosws` module can be imported successfully can be done in the Python Interactive Shell by typing.
|
||||
|
||||
```python
|
||||
import taosws
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -166,6 +192,28 @@ If the test is successful, it will output the server version information, e.g.
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection" groupId="connect">
|
||||
|
||||
For WebSocket connection, make sure the cluster and taosAdapter component, are running. This can be testetd using the following `curl` command.
|
||||
|
||||
```
|
||||
curl -i -N -d "show databases" -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -H "Connection: Upgrade" -H "Upgrade: websocket" -H "Host: <FQDN>:<PORT>" -H "Origin: http://<FQDN>:<PORT>" http://<FQDN>:<PORT>/rest/sql
|
||||
```
|
||||
|
||||
The FQDN above is the FQDN of the machine running taosAdapter, PORT is the port taosAdapter listening, default is `6041`.
|
||||
|
||||
If the test is successful, it will output the server version information, e.g.
|
||||
|
||||
```json
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json; charset=utf-8
|
||||
Date: Tue, 21 Mar 2023 09:29:17 GMT
|
||||
Transfer-Encoding: chunked
|
||||
|
||||
{"status":"succ","head":["server_version()"],"column_meta":[["server_version()",8,8]],"data":[["2.6.0.27"]],"rows":1}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -212,6 +260,16 @@ All arguments to the `connect()` function are optional keyword arguments. The fo
|
|||
- `password`: TDengine user password. The default is `taosdata`.
|
||||
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_examples.py:connect}}
|
||||
```
|
||||
|
||||
The parameter of `connect()` is the url of TDengine, and the protocol is `taosws` or `ws`.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -282,7 +340,95 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
|
|||
|
||||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
|
||||
```
|
||||
|
||||
- `conn.execute`: can use to execute arbitrary SQL statements, and return the number of rows affected.
|
||||
- `conn.query`: can use to execute query SQL statements, and return the query results.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Usage with req_id
|
||||
|
||||
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
##### TaosConnection class
|
||||
|
||||
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
|
||||
|
||||
```python title="execute method"
|
||||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
|
||||
```
|
||||
|
||||
```python title="query method"
|
||||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
|
||||
```
|
||||
|
||||
:::tip
|
||||
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
|
||||
:::
|
||||
|
||||
##### Use of TaosResult class
|
||||
|
||||
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
|
||||
|
||||
```python title="blocks_iter method"
|
||||
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
|
||||
```
|
||||
##### Use of the TaosCursor class
|
||||
|
||||
The `TaosConnection` class and the `TaosResult` class already implement all the functionality of the native interface. If you are familiar with the interfaces in the PEP249 specification, you can also use the methods provided by the `TaosCursor` class.
|
||||
|
||||
```python title="Use of TaosCursor"
|
||||
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
|
||||
```
|
||||
|
||||
:::note
|
||||
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
||||
##### Use of TaosRestCursor class
|
||||
|
||||
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
|
||||
|
||||
```python title="Use of TaosRestCursor"
|
||||
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
|
||||
```
|
||||
- `cursor.execute`: Used to execute arbitrary SQL statements.
|
||||
- `cursor.rowcount` : For write operations, returns the number of successful rows written. For query operations, returns the number of rows in the result set.
|
||||
- `cursor.description` : Returns the description of the field. Please refer to [TaosRestCursor](https://docs.taosdata.com/api/taospy/taosrest/cursor.html) for the specific format of the description information.
|
||||
|
||||
##### Use of the RestClient class
|
||||
|
||||
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
||||
|
||||
```python title="Use of RestClient"
|
||||
{{#include docs/examples/python/rest_client_with_req_id_example.py}}
|
||||
```
|
||||
|
||||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
|
||||
```
|
||||
|
||||
- `conn.execute`: can use to execute arbitrary SQL statements, and return the number of rows affected.
|
||||
- `conn.query`: can use to execute query SQL statements, and return the query results.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
@ -303,6 +449,13 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
|||
{{#include docs/examples/python/conn_rest_pandas.py}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/conn_websocket_pandas.py}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Node.js
|
||||
title: TDengine Node.js Connector
|
||||
sidebar_label: Node.js
|
||||
description: This document describes the TDengine Node.js connector.
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
|
@ -31,7 +32,9 @@ Please refer to [version support list](/reference/connector#version-support)
|
|||
|
||||
## Supported features
|
||||
|
||||
### Native connectors
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="Native connector">
|
||||
|
||||
1. Connection Management
|
||||
2. General Query
|
||||
|
@ -40,12 +43,16 @@ Please refer to [version support list](/reference/connector#version-support)
|
|||
5. Subscription
|
||||
6. Schemaless
|
||||
|
||||
### REST Connector
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connector">
|
||||
|
||||
1. Connection Management
|
||||
2. General Query
|
||||
3. Continuous Query
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Installation Steps
|
||||
|
||||
### Pre-installation preparation
|
||||
|
@ -59,9 +66,19 @@ Please refer to [version support list](/reference/connector#version-support)
|
|||
- `python` (recommended for `v2.7` , `v3.x.x` currently not supported)
|
||||
- `@tdengine/client` 3.0.0 supports Node.js LTS v10.9.0 or later and Node.js LTS v12.8.0 or later. Older versions may be incompatible.
|
||||
- `make`
|
||||
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or higher
|
||||
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or later.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="macOS" label="macOS installation dependencies">
|
||||
|
||||
- `python` (recommended for `v2.7` , `v3.x.x` currently not supported)
|
||||
- `@tdengine/client` 3.0.0 currently supports Node.js from v12.22.12, but only later versions of v12. Other versions may be incompatible.
|
||||
- `make`
|
||||
- C compiler, [GCC](https://gcc.gnu.org) v4.8.5 or later.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Windows" label="Windows system installation dependencies">
|
||||
|
||||
- Installation method 1
|
||||
|
@ -104,6 +121,9 @@ npm install @tdengine/rest
|
|||
|
||||
### Verify
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="Native connector">
|
||||
|
||||
After installing the TDengine client, use the `nodejsChecker.js` program to verify that the current environment supports Node.js access to TDengine.
|
||||
|
||||
Verification in details:
|
||||
|
@ -120,6 +140,28 @@ node nodejsChecker.js host=localhost
|
|||
|
||||
- After executing the above steps, the command-line will output the result of `nodejsChecker.js` connecting to the TDengine instance and performing a simple insert and query.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connector">
|
||||
|
||||
After installing the TDengine client, use the `restChecker.js` program to verify that the current environment supports Node.js access to TDengine.
|
||||
|
||||
Verification in details:
|
||||
|
||||
- Create an installation test folder such as `~/tdengine-test`. Download the [restChecker.js source code](https://github.com/taosdata/TDengine/tree/3.0/docs/examples/node/restexample/restChecker.js) to your local.
|
||||
|
||||
- Execute the following command from the command-line.
|
||||
|
||||
```bash
|
||||
npm init -y
|
||||
npm install @tdengine/rest
|
||||
node restChecker.js
|
||||
```
|
||||
|
||||
- After executing the above steps, the command-line will output the result of `restChecker.js` connecting to the TDengine instance and performing a simple insert and query.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Establishing a connection
|
||||
|
||||
Please choose to use one of the connectors.
|
||||
|
@ -171,24 +213,69 @@ let cursor = conn.cursor();
|
|||
|
||||
#### SQL Write
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
<NodeInsert />
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
||||
```js
|
||||
{{#include docs/examples/node/restexample/insert_example.js}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### InfluxDB line protocol write
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
<NodeInfluxLine />
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### OpenTSDB Telnet line protocol write
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
<NodeOpenTSDBTelnet />
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### OpenTSDB JSON line protocol write
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
<NodeOpenTSDBJson />
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Querying data
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
<NodeQuery />
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
||||
```js
|
||||
{{#include docs/examples/node/restexample/query_example.js}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
||||
## More sample programs
|
||||
|
||||
|
@ -249,4 +336,4 @@ let cursor = conn.cursor();
|
|||
|
||||
## API Reference
|
||||
|
||||
[API Reference](https://docs.taosdata.com/api/td2.0-connector/)
|
||||
[API Reference](https://docs.taosdata.com/api/td2.0-connector/)
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: C#
|
||||
title: C# Connector
|
||||
sidebar_label: C#
|
||||
description: This document describes the TDengine C# connector.
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
|
@ -17,7 +18,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
|
||||
|
||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
||||
The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket from v3.0.1 and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
|
||||
|
||||
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
|
||||
|
||||
|
@ -66,31 +67,43 @@ Please refer to [version support list](/reference/connector#version-support)
|
|||
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
|
||||
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details
|
||||
|
||||
### Install via dotnet CLI
|
||||
### Install `TDengine.Connector`
|
||||
|
||||
<Tabs defaultValue="CLI">
|
||||
<TabItem value="CLI" label="Get C# driver using dotnet CLI">
|
||||
<TabItem value="CLI" label="Native Connection">
|
||||
|
||||
You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` command under the path of the existing .NET project.
|
||||
You can reference the `TDengine.Connector` published in Nuget to the current project via the `dotnet` CLI under the path of the existing .NET project.
|
||||
|
||||
``` bash
|
||||
dotnet add package TDengine.Connector
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="source" label="Use source code to get C# driver">
|
||||
You may also modify the current.NET project file. You can include the following 'ItemGroup' in your project file (.csproj).
|
||||
|
||||
You can [download the source code](https://github.com/taosdata/taos-connector-dotnet/tree/3.0) and directly reference the latest version of the TDengine.Connector library.
|
||||
|
||||
```bash
|
||||
git clone -b 3.0 https://github.com/taosdata/taos-connector-dotnet.git
|
||||
cd taos-connector-dotnet
|
||||
cp -r src/ myProject
|
||||
|
||||
cd myProject
|
||||
dotnet add exmaple.csproj reference src/TDengine.csproj
|
||||
``` XML
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
|
||||
</ItemGroup>
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="source" label="WebSocket Connection">
|
||||
|
||||
In this scenario, modifying your project file is required in order to copy the WebSocket dependency dynamic library from the nuget package into your project.
|
||||
```XML
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
|
||||
</ItemGroup>
|
||||
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
|
||||
<ItemGroup>
|
||||
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
|
||||
</ItemGroup>
|
||||
<Copy SourceFiles="@(DepDLLFiles)" DestinationFolder="$(OutDir)" />
|
||||
</Target>
|
||||
```
|
||||
|
||||
Notice: `TDengine.Connector` only version>= 3.0.2 includes the dynamic library for WebSocket.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
@ -252,19 +265,20 @@ ws://localhost:6041/test
|
|||
|
||||
|Sample program |Sample program description |
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
|
||||
## Important update records
|
||||
|
||||
| TDengine.Connector | Description |
|
||||
|--------------------|--------------------------------|
|
||||
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: PHP
|
||||
title: PHP Connector
|
||||
sidebar_label: PHP
|
||||
description: This document describes the TDengine PHP connector.
|
||||
---
|
||||
|
||||
`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine.
|
||||
|
@ -86,7 +87,7 @@ In this section a few sample programs which use TDengine PHP connector to access
|
|||
|
||||
> Any error would throw exception: `TDengine\Exception\TDengineException`
|
||||
|
||||
### Establish Conection
|
||||
### Establish Connection
|
||||
|
||||
<details>
|
||||
<summary>Establish Connection</summary>
|
||||
|
|
|
@ -1 +1 @@
|
|||
label: "connector"
|
||||
label: "Connector"
|
||||
|
|
|
@ -14,7 +14,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
Once the package is unzipped, you will see the following files in the directory:
|
||||
- _ install_client.sh_: install script
|
||||
- _ taos.tar.gz_: client driver package
|
||||
- _ package.tar.gz_: client driver package
|
||||
- _ driver_: TDengine client driver
|
||||
- _examples_: some example programs of different programming languages (C/C#/go/JDBC/MATLAB/python/R)
|
||||
You can run `install_client.sh` to install it.
|
||||
|
|
|
@ -11,7 +11,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
The default installation path is C:\TDengine, including the following files (directories).
|
||||
|
||||
- _taos.exe_: TDengine CLI command-line program
|
||||
- _taosadapter.exe_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
|
||||
- _taosadapter.exe_: server-side executable that provides RESTful services and accepts writing requests from a variety of other software
|
||||
- _taosBenchmark.exe_: TDengine testing tool
|
||||
- _cfg_: configuration file directory
|
||||
- _driver_: client driver dynamic link library
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: Connector
|
||||
description: This document describes the connectors that TDengine provides to interface with various programming languages.
|
||||
---
|
||||
|
||||
TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
|
||||
|
@ -59,11 +60,11 @@ The different database framework specifications for various programming language
|
|||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not supported | Not supported | support | Support | Not supported | Support |
|
||||
| **Subscription (TMQ) ** | Not supported | Not supported | support | Not supported | Not supported | Support |
|
||||
| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | support | Support | Support |
|
||||
| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
:::warning
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: "taosAdapter"
|
||||
description: "taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine."
|
||||
sidebar_label: "taosAdapter"
|
||||
title: taosAdapter
|
||||
sidebar_label: taosAdapter
|
||||
description: This document describes how to use taosAdapter, a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications.
|
||||
---
|
||||
|
||||
import Prometheus from "./_prometheus.mdx"
|
||||
|
@ -21,6 +21,7 @@ taosAdapter provides the following features.
|
|||
- Seamless connection to collectd
|
||||
- Seamless connection to StatsD
|
||||
- Supports Prometheus remote_read and remote_write
|
||||
- Get table's VGroup ID
|
||||
|
||||
## taosAdapter architecture diagram
|
||||
|
||||
|
@ -57,6 +58,7 @@ Usage of taosAdapter:
|
|||
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
|
||||
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
|
||||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
|
||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||
|
@ -66,8 +68,9 @@ Usage of taosAdapter:
|
|||
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
|
||||
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
|
||||
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
|
||||
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG"
|
||||
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
|
||||
--help Print this help message and exit
|
||||
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
|
||||
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
|
||||
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
|
||||
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
|
||||
|
@ -78,14 +81,17 @@ Usage of taosAdapter:
|
|||
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
|
||||
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
|
||||
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
|
||||
--monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s)
|
||||
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY"
|
||||
--monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP"
|
||||
--monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
|
||||
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
|
||||
--monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root")
|
||||
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s)
|
||||
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD"
|
||||
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
|
||||
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
|
||||
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
|
||||
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
|
||||
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
|
||||
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
|
||||
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
|
||||
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
|
||||
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
|
||||
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
|
||||
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
|
||||
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
|
||||
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
|
||||
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
|
||||
|
@ -98,6 +104,7 @@ Usage of taosAdapter:
|
|||
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
|
||||
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
|
||||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
|
||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||
|
@ -109,10 +116,11 @@ Usage of taosAdapter:
|
|||
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
|
||||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
|
||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s)
|
||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000)
|
||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
|
||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
|
||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
|
||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
|
||||
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
|
||||
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
|
||||
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
|
||||
|
@ -129,6 +137,7 @@ Usage of taosAdapter:
|
|||
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
|
||||
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
|
||||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
||||
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
|
||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||
|
@ -174,6 +183,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
|
|||
node_export is an exporter for machine metrics. Please visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
||||
- Support for Prometheus remote_read and remote_write
|
||||
remote_read and remote_write are interfaces for Prometheus data read and write from/to other data storage solution. Please visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
||||
- Get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
|
||||
## Interfaces
|
||||
|
||||
|
@ -195,6 +205,7 @@ Support InfluxDB query parameters as follows.
|
|||
- `precision` The time precision used by TDengine
|
||||
- `u` TDengine user name
|
||||
- `p` TDengine password
|
||||
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
|
||||
|
||||
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
|
||||
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
|
@ -236,6 +247,10 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
|
|||
|
||||
<Prometheus />
|
||||
|
||||
### Get table's VGroup ID
|
||||
|
||||
You can call `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get table's VGroup ID. For more information about VGroup, please refer to [primary-logic-unit](/tdinternal/arch/#primary-logic-unit).
|
||||
|
||||
## Memory usage optimization methods
|
||||
|
||||
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
|
||||
|
@ -313,6 +328,10 @@ This parameter controls the number of results returned by the following interfac
|
|||
- `http://<fqdn>:6041/rest/sql`
|
||||
- `http://<fqdn>:6041/prometheus/v1/remote_read/:db`
|
||||
|
||||
## Configure http return code
|
||||
|
||||
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 http status code http status code other than when the C interface returns an error. When set to true, different http status codes will be returned according to the error code returned by C. For details, see [RESTful API](https://docs.tdengine.com/reference/rest-api/) HTTP Response Code chapter.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can check the taosAdapter running status with the `systemctl status taosadapter` command.
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
title: taosBenchmark
|
||||
sidebar_label: taosBenchmark
|
||||
description: This document describes how to use taosBenchmark, a tool for testing the performance of TDengine.
|
||||
toc_max_heading_level: 4
|
||||
description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine."
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
@ -92,7 +92,7 @@ taosBenchmark -f <json file>
|
|||
|
||||
</details>
|
||||
|
||||
## Command-line argument in detailed
|
||||
## Command-line argument in detail
|
||||
|
||||
- **-f/--file <json file\>** :
|
||||
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
|
||||
|
@ -198,19 +198,28 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-R/--disorder-range <timeRange\>** :
|
||||
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
|
||||
|
||||
- **-F/--prepare_rand <Num\>** :
|
||||
- **-F/--prepared_rand <Num\>** :
|
||||
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
- **-a/--replica <replicaNum\>** :
|
||||
Specify the number of replicas when creating the database. The default value is 1.
|
||||
|
||||
- **-k/--keep-trying <NUMBER\>** :
|
||||
Keep trying if failed to insert, default is no. Available with v3.0.9+.
|
||||
|
||||
- **-z/--trying-interval <NUMBER\>** :
|
||||
Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||
|
||||
- **-v/--vgroups <NUMBER\>** :
|
||||
Specify vgroups number for creating a database, only valid with daemon version 3.0+
|
||||
|
||||
- **-V/--version** :
|
||||
Show version information only. Users should not use it with other parameters.
|
||||
|
||||
- **-? /--help** :
|
||||
Show help information and exit. Users should not use it with other parameters.
|
||||
|
||||
## Configuration file parameters in detailed
|
||||
## Configuration file parameters in detail
|
||||
|
||||
### General configuration parameters
|
||||
|
||||
|
@ -231,6 +240,18 @@ The parameters listed in this section apply to all function modes.
|
|||
|
||||
`filetype` must be set to `insert` in the insertion scenario. See [General Configuration Parameters](#General Configuration Parameters)
|
||||
|
||||
- ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+.
|
||||
|
||||
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||
|
||||
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
|
||||
|
||||
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
|
||||
|
||||
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
|
||||
- "continue_if_fail" : "yes" // means taosBenchmark will warn the user if it fails to insert but continue to insert the next record.
|
||||
- "continue_if_fail": "smart" // means taosBenchmark will try to create the non-existent child table if it fails to insert.
|
||||
|
||||
#### Database related configuration parameters
|
||||
|
||||
The parameters related to database creation are configured in `dbinfo` in the json configuration file, as follows. The other parameters correspond to the database parameters specified when `create database` in [../../taos-sql/database].
|
||||
|
@ -342,7 +363,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
|
||||
- **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value.
|
||||
|
||||
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maxium value.
|
||||
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value.
|
||||
|
||||
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
||||
|
||||
|
@ -370,7 +391,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
- **num_of_records_per_req** :
|
||||
Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements.
|
||||
|
||||
- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
- **prepared_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
### Query scenario configuration parameters
|
||||
|
||||
|
@ -382,11 +403,11 @@ See [General Configuration Parameters](#General Configuration Parameters) for de
|
|||
|
||||
#### Configuration parameters for executing the specified query statement
|
||||
|
||||
The configuration parameters for querying the sub-tables or the normal tables are set in `specified_table_query`.
|
||||
The configuration parameters for querying the specified table (it can be a super table, a sub-table or a normal table) are set in `specified_table_query`.
|
||||
|
||||
- **query_interval** : The query interval in seconds, the default value is 0.
|
||||
|
||||
- **threads**: The number of threads to execute the query SQL, the default value is 1.
|
||||
- **threads/concurrent**: The number of threads to execute the query SQL, the default value is 1.
|
||||
|
||||
- **sqls**.
|
||||
- **sql**: the SQL command to be executed.
|
||||
|
@ -413,9 +434,9 @@ The configuration parameters of the super table query are set in `super_table_qu
|
|||
|
||||
#### Configuration parameters for executing the specified subscription statement
|
||||
|
||||
The configuration parameters for subscribing to a sub-table or a generic table are set in `specified_table_query`.
|
||||
The configuration parameters for subscribing to a specified table (it can be a super table, a sub-table or a generic table) are set in `specified_table_query`.
|
||||
|
||||
- **threads**: The number of threads to execute SQL, default is 1.
|
||||
- **threads/concurrent**: The number of threads to execute SQL, default is 1.
|
||||
|
||||
- **interval**: The time interval to execute the subscription, in seconds, default is 0.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: taosdump
|
||||
description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster."
|
||||
description: This document describes how to use taosdump, a tool for backing up and restoring the data in a TDengine cluster.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
@ -19,7 +19,7 @@ Users should not use taosdump to back up raw data, environment settings, hardwar
|
|||
|
||||
There are two ways to install taosdump:
|
||||
|
||||
- Install the taosTools official installer. Please find taosTools from [All download links](https://www.tdengine.com/all-downloads) page and download and install it.
|
||||
- Install the taosTools official installer. Please find taosTools from [Release History](https://docs.taosdata.com/releases/tools/) page and download and install it.
|
||||
|
||||
- Compile taos-tools separately and install it. Please refer to the [taos-tools](https://github.com/taosdata/taos-tools) repository for details.
|
||||
|
||||
|
|
|
@ -1590,7 +1590,7 @@
|
|||
},
|
||||
{
|
||||
"datasource": "${DS_TDENGINE}",
|
||||
"description": "taosd max memery last 10 minutes",
|
||||
"description": "taosd max memory last 10 minutes",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
@ -1919,7 +1919,7 @@
|
|||
},
|
||||
{
|
||||
"datasource": "${DS_TDENGINE}",
|
||||
"description": "taosd max memery last 10 minutes",
|
||||
"description": "taosd max memory last 10 minutes",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
@ -1977,7 +1977,7 @@
|
|||
},
|
||||
{
|
||||
"datasource": "${DS_TDENGINE}",
|
||||
"description": "taosd max memery last 10 minutes",
|
||||
"description": "taosd max memory last 10 minutes",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
@ -2825,7 +2825,7 @@
|
|||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Requets Count per Minutes $fqdn",
|
||||
"title": "Requests Count per Minutes $fqdn",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 78 KiB |
|
@ -1566,7 +1566,7 @@
|
|||
},
|
||||
{
|
||||
"datasource": "${ds}",
|
||||
"description": "taosd max memery last 10 minutes",
|
||||
"description": "taosd max memory last 10 minutes",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
@ -1933,7 +1933,7 @@
|
|||
},
|
||||
{
|
||||
"datasource": "${ds}",
|
||||
"description": "taosd max memery last 10 minutes",
|
||||
"description": "taosd max memory last 10 minutes",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
@ -2000,7 +2000,7 @@
|
|||
},
|
||||
{
|
||||
"datasource": "${ds}",
|
||||
"description": "taosd max memery last 10 minutes",
|
||||
"description": "taosd max memory last 10 minutes",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
|
@ -2961,7 +2961,7 @@
|
|||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Requets Count per Minutes $fqdn",
|
||||
"title": "Requests Count per Minutes $fqdn",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
|
@ -3355,4 +3355,4 @@
|
|||
"title": "TDengine",
|
||||
"uid": "tdengine",
|
||||
"version": 8
|
||||
}
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@
|
|||
},
|
||||
{
|
||||
"datasource": "TDengine",
|
||||
"description": "taosd max memery last 10 minutes",
|
||||
"description": "taosd max memory last 10 minutes",
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
|
@ -253,7 +253,7 @@
|
|||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "taosd memery",
|
||||
"title": "taosd memory",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue