fix:offset error in tmq & add test cases
|
@ -16,7 +16,6 @@ debug/
|
||||||
release/
|
release/
|
||||||
target/
|
target/
|
||||||
debs/
|
debs/
|
||||||
deps/
|
|
||||||
rpms/
|
rpms/
|
||||||
mac/
|
mac/
|
||||||
*.pyc
|
*.pyc
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v2.3.0
|
||||||
|
hooks:
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-json
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: trailing-whitespace
|
||||||
|
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: stable
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/pocc/pre-commit-hooks
|
||||||
|
rev: master
|
||||||
|
hooks:
|
||||||
|
- id: cppcheck
|
||||||
|
args: ["--error-exitcode=0"]
|
||||||
|
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/crate-ci/typos
|
||||||
|
rev: v1.15.7
|
||||||
|
hooks:
|
||||||
|
- id: typos
|
||||||
|
|
|
@ -15,11 +15,15 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||||
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
||||||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
include(${TD_SUPPORT_DIR}/cmake.platform)
|
include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||||
|
|
||||||
|
|
||||||
# contrib
|
# contrib
|
||||||
add_subdirectory(contrib)
|
add_subdirectory(contrib)
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
|
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
|
||||||
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
|
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
|
||||||
|
|
||||||
如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。
|
如遇任何问题,请添加官方微信 tdengine1。我们的团队会帮忙解决。
|
||||||
|
|
||||||
## 给贡献者的礼品
|
## 给贡献者的礼品
|
||||||
|
|
||||||
|
@ -48,4 +48,4 @@ TDengine 社区致力于让更多的开发者理解和使用它。
|
||||||
|
|
||||||
## 联系我们
|
## 联系我们
|
||||||
|
|
||||||
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO
|
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:tdengine1。
|
||||||
|
|
|
@ -314,7 +314,7 @@ def pre_test_build_win() {
|
||||||
cd %WIN_CONNECTOR_ROOT%
|
cd %WIN_CONNECTOR_ROOT%
|
||||||
python.exe -m pip install --upgrade pip
|
python.exe -m pip install --upgrade pip
|
||||||
python -m pip uninstall taospy -y
|
python -m pip uninstall taospy -y
|
||||||
python -m pip install taospy==2.7.6
|
python -m pip install taospy==2.7.10
|
||||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||||
'''
|
'''
|
||||||
return 1
|
return 1
|
||||||
|
|
20
README-CN.md
|
@ -15,7 +15,7 @@
|
||||||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||||
|
|
||||||
简体中文 | [English](README.md) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
|
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
|
||||||
|
|
||||||
# TDengine 简介
|
# TDengine 简介
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
|
||||||
|
|
||||||
# 构建
|
# 构建
|
||||||
|
|
||||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。
|
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
|
||||||
|
|
||||||
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
||||||
|
|
||||||
|
@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
||||||
```bash
|
```bash
|
||||||
sudo yum install epel-release
|
sudo yum install epel-release
|
||||||
sudo yum update
|
sudo yum update
|
||||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
|
||||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||||
```
|
```
|
||||||
|
|
||||||
### CentOS 8 & Fedora
|
### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
||||||
|
@ -88,7 +88,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### CentOS 8/Rocky Linux
|
#### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo yum install -y epel-release
|
sudo yum install -y epel-release
|
||||||
|
@ -101,7 +101,7 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
|
||||||
|
|
||||||
若 powertools 安装失败,可以尝试改用:
|
若 powertools 安装失败,可以尝试改用:
|
||||||
```
|
```
|
||||||
sudo yum config-manager --set-enabled Powertools
|
sudo yum config-manager --set-enabled powertools
|
||||||
```
|
```
|
||||||
|
|
||||||
#### CentOS + devtoolset
|
#### CentOS + devtoolset
|
||||||
|
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
```
|
```
|
||||||
brew install argp-standalone pkgconfig
|
brew install argp-standalone gflags pkgconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
### 设置 golang 开发环境
|
### 设置 golang 开发环境
|
||||||
|
@ -175,7 +175,7 @@ cd TDengine
|
||||||
```bash
|
```bash
|
||||||
mkdir debug
|
mkdir debug
|
||||||
cd debug
|
cd debug
|
||||||
cmake .. -DBUILD_TOOLS=true
|
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -352,4 +352,4 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
|
||||||
|
|
||||||
# 加入技术交流群
|
# 加入技术交流群
|
||||||
|
|
||||||
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine1",加小 T 为好友,即可入群。
|
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
|
||||||
|
|
14
README.md
|
@ -47,7 +47,7 @@ For user manual, system design and architecture, please refer to [TDengine Docum
|
||||||
|
|
||||||
# Building
|
# Building
|
||||||
|
|
||||||
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
|
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||||
|
|
||||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
||||||
|
|
||||||
|
@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
||||||
```bash
|
```bash
|
||||||
sudo yum install epel-release
|
sudo yum install epel-release
|
||||||
sudo yum update
|
sudo yum update
|
||||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
|
||||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||||
```
|
```
|
||||||
|
|
||||||
### CentOS 8 & Fedora
|
### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install build dependencies for taosTools on CentOS
|
#### Install build dependencies for taosTools on CentOS
|
||||||
|
@ -94,7 +94,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### CentOS 8/Rocky Linux
|
#### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo yum install -y epel-release
|
sudo yum install -y epel-release
|
||||||
|
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
```
|
```
|
||||||
brew install argp-standalone pkgconfig
|
brew install argp-standalone gflags pkgconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
### Setup golang environment
|
### Setup golang environment
|
||||||
|
@ -183,7 +183,7 @@ It equals to execute following commands:
|
||||||
```bash
|
```bash
|
||||||
mkdir debug
|
mkdir debug
|
||||||
cd debug
|
cd debug
|
||||||
cmake .. -DBUILD_TOOLS=true
|
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
2
build.sh
|
@ -4,5 +4,5 @@ if [ ! -d debug ]; then
|
||||||
mkdir debug || echo -e "failed to make directory for build"
|
mkdir debug || echo -e "failed to make directory for build"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd debug && cmake .. -DBUILD_TOOLS=true && make
|
cd debug && cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true && make
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
cmake_minimum_required(VERSION 3.0)
|
cmake_minimum_required(VERSION 3.0)
|
||||||
|
|
||||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||||
|
|
||||||
#set output directory
|
#set output directory
|
||||||
|
@ -115,15 +115,6 @@ ELSE ()
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (${BUILD_SANITIZER})
|
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
|
||||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
|
||||||
ELSE ()
|
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
|
||||||
ENDIF ()
|
|
||||||
|
|
||||||
# disable all assert
|
# disable all assert
|
||||||
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
||||||
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
||||||
|
@ -165,4 +156,20 @@ ELSE ()
|
||||||
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
|
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
# build mode
|
||||||
|
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
|
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
|
|
||||||
|
IF (${BUILD_SANITIZER})
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||||
|
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||||
|
ELSEIF (${BUILD_RELEASE})
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||||
|
ELSE ()
|
||||||
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
|
@ -64,12 +64,25 @@ IF(${TD_WINDOWS})
|
||||||
ON
|
ON
|
||||||
)
|
)
|
||||||
|
|
||||||
|
MESSAGE("build geos Win32")
|
||||||
|
option(
|
||||||
|
BUILD_GEOS
|
||||||
|
"If build geos on Windows"
|
||||||
|
ON
|
||||||
|
)
|
||||||
|
|
||||||
ELSEIF (TD_DARWIN_64)
|
ELSEIF (TD_DARWIN_64)
|
||||||
IF(${BUILD_TEST})
|
IF(${BUILD_TEST})
|
||||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
option(
|
||||||
|
BUILD_GEOS
|
||||||
|
"If build geos on Windows"
|
||||||
|
ON
|
||||||
|
)
|
||||||
|
|
||||||
option(
|
option(
|
||||||
BUILD_SHARED_LIBS
|
BUILD_SHARED_LIBS
|
||||||
""
|
""
|
||||||
|
@ -171,3 +184,14 @@ option(
|
||||||
ON
|
ON
|
||||||
)
|
)
|
||||||
|
|
||||||
|
option(
|
||||||
|
BUILD_RELEASE
|
||||||
|
"If build release version"
|
||||||
|
OFF
|
||||||
|
)
|
||||||
|
|
||||||
|
option(
|
||||||
|
BUILD_CONTRIB
|
||||||
|
"If build thirdpart from source"
|
||||||
|
OFF
|
||||||
|
)
|
||||||
|
|
|
@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "")
|
||||||
SET(TD_LOONGARCH_64 TRUE)
|
SET(TD_LOONGARCH_64 TRUE)
|
||||||
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
|
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
|
||||||
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
|
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
|
||||||
|
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
|
||||||
|
SET(PLATFORM_ARCH_STR "mips")
|
||||||
|
MESSAGE(STATUS "input cpuType: mips64")
|
||||||
|
SET(TD_MIPS_64 TRUE)
|
||||||
|
ADD_DEFINITIONS("-D_TD_MIPS_")
|
||||||
|
ADD_DEFINITIONS("-D_TD_MIPS_64")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ELSE ()
|
ELSE ()
|
||||||
# if generate ARM version:
|
# if generate ARM version:
|
||||||
|
@ -172,5 +178,17 @@ ENDIF()
|
||||||
|
|
||||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||||
|
|
||||||
|
set(TD_DEPS_DIR "x86")
|
||||||
|
if (TD_LINUX)
|
||||||
|
IF (TD_ARM_64 OR TD_ARM_32)
|
||||||
|
set(TD_DEPS_DIR "arm")
|
||||||
|
ELSEIF (TD_MIPS_64)
|
||||||
|
set(TD_DEPS_DIR "mips")
|
||||||
|
ELSE()
|
||||||
|
set(TD_DEPS_DIR "x86")
|
||||||
|
ENDIF()
|
||||||
|
endif()
|
||||||
|
MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR})
|
||||||
|
|
||||||
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
||||||
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")
|
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.0.4.1")
|
SET(TD_VER_NUMBER "3.1.1.0.alpha")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
|
||||||
|
# geos
|
||||||
|
ExternalProject_Add(geos
|
||||||
|
GIT_REPOSITORY https://github.com/libgeos/geos.git
|
||||||
|
GIT_TAG 3.12.0
|
||||||
|
SOURCE_DIR "${TD_CONTRIB_DIR}/geos"
|
||||||
|
BINARY_DIR ""
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
TEST_COMMAND ""
|
||||||
|
)
|
|
@ -1,11 +1,29 @@
|
||||||
|
|
||||||
# rocksdb
|
# rocksdb
|
||||||
ExternalProject_Add(rocksdb
|
if (${BUILD_CONTRIB})
|
||||||
GIT_REPOSITORY https://github.com/taosdata-contrib/rocksdb.git
|
ExternalProject_Add(rocksdb
|
||||||
GIT_TAG v6.23.3
|
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
|
||||||
|
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
|
||||||
|
DOWNLOAD_NO_PROGRESS 1
|
||||||
|
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||||
CONFIGURE_COMMAND ""
|
CONFIGURE_COMMAND ""
|
||||||
BUILD_COMMAND ""
|
BUILD_COMMAND ""
|
||||||
INSTALL_COMMAND ""
|
INSTALL_COMMAND ""
|
||||||
TEST_COMMAND ""
|
TEST_COMMAND ""
|
||||||
)
|
)
|
||||||
|
else()
|
||||||
|
if (NOT ${TD_LINUX})
|
||||||
|
ExternalProject_Add(rocksdb
|
||||||
|
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
|
||||||
|
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
|
||||||
|
DOWNLOAD_NO_PROGRESS 1
|
||||||
|
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||||
|
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
TEST_COMMAND ""
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
# stub
|
# stub
|
||||||
ExternalProject_Add(stub
|
ExternalProject_Add(stub
|
||||||
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
||||||
|
GIT_TAG 5e903b8e
|
||||||
GIT_SUBMODULES "src"
|
GIT_SUBMODULES "src"
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
||||||
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosadapter
|
# taosadapter
|
||||||
ExternalProject_Add(taosadapter
|
ExternalProject_Add(taosadapter
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||||
GIT_TAG 565ca21
|
GIT_TAG main
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taos-tools
|
# taos-tools
|
||||||
ExternalProject_Add(taos-tools
|
ExternalProject_Add(taos-tools
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||||
GIT_TAG 4378702
|
GIT_TAG main
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -77,11 +77,23 @@ if(${BUILD_WITH_LEVELDB})
|
||||||
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
endif(${BUILD_WITH_LEVELDB})
|
endif(${BUILD_WITH_LEVELDB})
|
||||||
|
|
||||||
# rocksdb
|
if (${BUILD_CONTRIB})
|
||||||
if(${BUILD_WITH_ROCKSDB})
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
add_definitions(-DUSE_ROCKSDB)
|
add_definitions(-DUSE_ROCKSDB)
|
||||||
endif(${BUILD_WITH_ROCKSDB})
|
endif()
|
||||||
|
else()
|
||||||
|
if (NOT ${TD_LINUX})
|
||||||
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
|
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
add_definitions(-DUSE_ROCKSDB)
|
||||||
|
endif(${BUILD_WITH_ROCKSDB})
|
||||||
|
else()
|
||||||
|
if(${BUILD_WITH_ROCKSDB})
|
||||||
|
add_definitions(-DUSE_ROCKSDB)
|
||||||
|
endif(${BUILD_WITH_ROCKSDB})
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
# canonical-raft
|
# canonical-raft
|
||||||
if(${BUILD_WITH_CRAFT})
|
if(${BUILD_WITH_CRAFT})
|
||||||
|
@ -134,6 +146,11 @@ if(${BUILD_ADDR2LINE})
|
||||||
endif(NOT ${TD_WINDOWS})
|
endif(NOT ${TD_WINDOWS})
|
||||||
endif(${BUILD_ADDR2LINE})
|
endif(${BUILD_ADDR2LINE})
|
||||||
|
|
||||||
|
# geos
|
||||||
|
if(${BUILD_GEOS})
|
||||||
|
cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
endif()
|
||||||
|
|
||||||
# download dependencies
|
# download dependencies
|
||||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||||
|
@ -222,17 +239,39 @@ endif(${BUILD_WITH_LEVELDB})
|
||||||
|
|
||||||
# rocksdb
|
# rocksdb
|
||||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||||
if(${BUILD_WITH_ROCKSDB})
|
if (${BUILD_WITH_UV})
|
||||||
if(${TD_LINUX})
|
if(${TD_LINUX})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||||
|
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
|
endif()
|
||||||
endif(${TD_LINUX})
|
endif(${TD_LINUX})
|
||||||
|
endif (${BUILD_WITH_UV})
|
||||||
|
|
||||||
|
if (${BUILD_WITH_ROCKSDB})
|
||||||
|
if (${BUILD_CONTRIB})
|
||||||
|
if(${TD_LINUX})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||||
|
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
|
endif()
|
||||||
|
endif(${TD_LINUX})
|
||||||
|
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
endif(${TD_DARWIN})
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
|
if (${TD_DARWIN_ARM64})
|
||||||
|
set(HAS_ARMV8_CRC true)
|
||||||
|
endif(${TD_DARWIN_ARM64})
|
||||||
|
|
||||||
if (${TD_WINDOWS})
|
if (${TD_WINDOWS})
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||||
|
option(WITH_JNI "" OFF)
|
||||||
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
endif(${TD_WINDOWS})
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
|
@ -242,16 +281,6 @@ if(${BUILD_WITH_ROCKSDB})
|
||||||
option(WITH_PERF_CONTEXT "" OFF)
|
option(WITH_PERF_CONTEXT "" OFF)
|
||||||
endif(${TD_DARWIN})
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
if(${TD_WINDOWS})
|
|
||||||
option(WITH_JNI "" ON)
|
|
||||||
endif(${TD_WINDOWS})
|
|
||||||
|
|
||||||
if(${TD_WINDOWS})
|
|
||||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
|
||||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
|
||||||
endif(${TD_WINDOWS})
|
|
||||||
|
|
||||||
|
|
||||||
option(WITH_FALLOCATE "" OFF)
|
option(WITH_FALLOCATE "" OFF)
|
||||||
option(WITH_JEMALLOC "" OFF)
|
option(WITH_JEMALLOC "" OFF)
|
||||||
option(WITH_GFLAGS "" OFF)
|
option(WITH_GFLAGS "" OFF)
|
||||||
|
@ -270,7 +299,53 @@ if(${BUILD_WITH_ROCKSDB})
|
||||||
rocksdb
|
rocksdb
|
||||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||||
)
|
)
|
||||||
endif(${BUILD_WITH_ROCKSDB})
|
else()
|
||||||
|
if (NOT ${TD_LINUX})
|
||||||
|
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||||
|
if(${TD_DARWIN})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||||
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
|
if (${TD_DARWIN_ARM64})
|
||||||
|
set(HAS_ARMV8_CRC true)
|
||||||
|
endif(${TD_DARWIN_ARM64})
|
||||||
|
|
||||||
|
if (${TD_WINDOWS})
|
||||||
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||||
|
option(WITH_JNI "" OFF)
|
||||||
|
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||||
|
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||||
|
endif(${TD_WINDOWS})
|
||||||
|
|
||||||
|
|
||||||
|
if(${TD_DARWIN})
|
||||||
|
option(HAVE_THREAD_LOCAL "" OFF)
|
||||||
|
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||||
|
option(WITH_PERF_CONTEXT "" OFF)
|
||||||
|
endif(${TD_DARWIN})
|
||||||
|
|
||||||
|
option(WITH_FALLOCATE "" OFF)
|
||||||
|
option(WITH_JEMALLOC "" OFF)
|
||||||
|
option(WITH_GFLAGS "" OFF)
|
||||||
|
option(PORTABLE "" ON)
|
||||||
|
option(WITH_LIBURING "" OFF)
|
||||||
|
option(FAIL_ON_WARNINGS OFF)
|
||||||
|
|
||||||
|
option(WITH_TESTS "" OFF)
|
||||||
|
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||||
|
option(WITH_TOOLS "" OFF)
|
||||||
|
option(WITH_LIBURING "" OFF)
|
||||||
|
|
||||||
|
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||||
|
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
||||||
|
target_include_directories(
|
||||||
|
rocksdb
|
||||||
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
# lucene
|
# lucene
|
||||||
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
||||||
|
@ -470,6 +545,23 @@ if(${BUILD_ADDR2LINE})
|
||||||
endif(NOT ${TD_WINDOWS})
|
endif(NOT ${TD_WINDOWS})
|
||||||
endif(${BUILD_ADDR2LINE})
|
endif(${BUILD_ADDR2LINE})
|
||||||
|
|
||||||
|
# geos
|
||||||
|
if(${BUILD_GEOS})
|
||||||
|
if(${TD_LINUX})
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||||
|
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||||
|
SET(CMAKE_BUILD_TYPE Release)
|
||||||
|
endif()
|
||||||
|
endif(${TD_LINUX})
|
||||||
|
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||||
|
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||||
|
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
||||||
|
target_include_directories(
|
||||||
|
geos_c
|
||||||
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||||
|
)
|
||||||
|
endif(${BUILD_GEOS})
|
||||||
|
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
# Build test
|
# Build test
|
||||||
|
|
After Width: | Height: | Size: 13 KiB |
After Width: | Height: | Size: 14 KiB |
|
@ -4,7 +4,7 @@ if(${BUILD_DOCS})
|
||||||
find_package(Doxygen)
|
find_package(Doxygen)
|
||||||
if (DOXYGEN_FOUND)
|
if (DOXYGEN_FOUND)
|
||||||
# Build the doc
|
# Build the doc
|
||||||
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/Doxyfile.in)
|
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/doxgen/Doxyfile.in)
|
||||||
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
|
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
|
||||||
|
|
||||||
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||||
|
|
|
@ -32,6 +32,20 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
|
||||||
|
|
||||||
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
||||||
|
|
||||||
|
If you need to persist data to a specific directory on your local machine, please run the following command:
|
||||||
|
```shell
|
||||||
|
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
|
||||||
|
-v ~/data/taos/dnode/log:/var/log/taos \
|
||||||
|
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||||
|
```
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. Also you can modify ~/data/taos/dnode/data to your any local empty data directory
|
||||||
|
- /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your any local empty log directory
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
Run the following command to ensure that your container is running:
|
Run the following command to ensure that your container is running:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -113,4 +127,4 @@ In the query above you are selecting the first timestamp (ts) in the interval, a
|
||||||
|
|
||||||
## Additional Information
|
## Additional Information
|
||||||
|
|
||||||
For more information about deploying TDengine in a Docker environment, see [Using TDengine in Docker](../../reference/docker).
|
For more information about deploying TDengine in a Docker environment, see [Deploying TDengine with Docker](../../deployment/docker).
|
||||||
|
|
|
@ -18,7 +18,20 @@ The full package of TDengine includes the TDengine Server (`taosd`), TDengine Cl
|
||||||
|
|
||||||
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
|
||||||
|
|
||||||
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
|
TDengine OSS is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
|
||||||
|
|
||||||
|
## Operating environment requirements
|
||||||
|
In the Linux system, the minimum requirements for the operating environment are as follows:
|
||||||
|
|
||||||
|
linux core version - 3.10.0-1160.83.1.el7.x86_64;
|
||||||
|
|
||||||
|
glibc version - 2.17;
|
||||||
|
|
||||||
|
If compiling and installing through clone source code, it is also necessary to meet the following requirements:
|
||||||
|
|
||||||
|
cmake version - 3.26.4 or above;
|
||||||
|
|
||||||
|
gcc version - 9.3.1 or above;
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
@ -188,7 +201,7 @@ You can use the TDengine CLI to monitor your TDengine deployment and execute ad
|
||||||
|
|
||||||
<TabItem label="Windows" value="windows">
|
<TabItem label="Windows" value="windows">
|
||||||
|
|
||||||
After the installation is complete, please run `sc start taosd` or run `C:\TDengine\taosd.exe` with administrator privilege to start TDengine Server.
|
After the installation is complete, please run `sc start taosd` or run `C:\TDengine\taosd.exe` with administrator privilege to start TDengine Server. Please run `sc start taosadapter` or run `C:\TDengine\taosadapter.exe` with administrator privilege to start taosAdapter to provide http/REST service.
|
||||||
|
|
||||||
## Command Line Interface (CLI)
|
## Command Line Interface (CLI)
|
||||||
|
|
||||||
|
|
|
@ -21,17 +21,6 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
|
||||||
<DocCardList items={useCurrentSidebarCategory().items}/>
|
<DocCardList items={useCurrentSidebarCategory().items}/>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Study TDengine Knowledge Map
|
|
||||||
|
|
||||||
The TDengine Knowledge Map covers the various knowledge points of TDengine, revealing the invocation relationships and data flow between various conceptual entities. Learning and understanding the TDengine Knowledge Map will help you quickly master the TDengine knowledge system.
|
|
||||||
|
|
||||||
<figure>
|
|
||||||
<center>
|
|
||||||
<a href="pathname:///img/tdengine-map.svg" target="_blank"><img src="/img/tdengine-map.svg" width="80%" /></a>
|
|
||||||
<figcaption>Diagram 1. TDengine Knowledge Map</figcaption>
|
|
||||||
</center>
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
## Join TDengine Community
|
## Join TDengine Community
|
||||||
|
|
||||||
<table width="100%">
|
<table width="100%">
|
||||||
|
|
|
@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.2.4</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
|
||||||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
|
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||||
|
|
||||||
### Insert Multiple Rows
|
### Insert Multiple Rows
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
|
||||||
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
|
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
|
||||||
```
|
```
|
||||||
|
|
||||||
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||||
|
|
||||||
### Insert into Multiple Tables
|
### Insert into Multiple Tables
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example
|
||||||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
|
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||||
|
|
||||||
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
|
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
|
||||||
|
|
||||||
|
|
|
@ -81,10 +81,6 @@ Set<String> subscription() throws SQLException;
|
||||||
|
|
||||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||||
|
|
||||||
void commitAsync();
|
|
||||||
|
|
||||||
void commitAsync(OffsetCommitCallback callback);
|
|
||||||
|
|
||||||
void commitSync() throws SQLException;
|
void commitSync() throws SQLException;
|
||||||
|
|
||||||
void close() throws SQLException;
|
void close() throws SQLException;
|
||||||
|
@ -105,6 +101,12 @@ class Consumer:
|
||||||
def poll(self, timeout: float = 1.0):
|
def poll(self, timeout: float = 1.0):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def assignment(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def poll(self, timeout: float = 1.0):
|
||||||
|
pass
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -238,6 +240,8 @@ The following SQL statement creates a topic in TDengine:
|
||||||
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
|
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- There is an upper limit to the number of topics created, controlled by the parameter tmqMaxTopicNum, with a default of 20
|
||||||
|
|
||||||
Multiple subscription types are supported.
|
Multiple subscription types are supported.
|
||||||
|
|
||||||
#### Subscribe to a Column
|
#### Subscribe to a Column
|
||||||
|
@ -259,14 +263,15 @@ You can subscribe to a topic through a SELECT statement. Statements that specify
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TOPIC topic_name AS STABLE stb_name
|
CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition]
|
||||||
```
|
```
|
||||||
|
|
||||||
Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows:
|
Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows:
|
||||||
|
|
||||||
- The table schema can be modified.
|
- The table schema can be modified.
|
||||||
- Unstructured data is returned. The format of the data returned changes based on the supertable schema.
|
- Unstructured data is returned. The format of the data returned changes based on the supertable schema.
|
||||||
- A different table schema may exist for every data block to be processed.
|
- The 'with meta' parameter is optional. When selected, statements such as creating super tables and sub tables will be returned, mainly used for Taosx to perform super table migration
|
||||||
|
- The 'where_condition' parameter is optional and will be used to filter and subscribe to sub tables that meet the criteria. Where conditions cannot have ordinary columns, only tags or tbnames. Functions can be used in where conditions to filter tags, but cannot be aggregate functions because sub table tag values cannot be aggregated. It can also be a constant expression, such as 2>1 (subscribing to all child tables), Or false (subscribe to 0 sub tables)
|
||||||
- The data returned does not include tags.
|
- The data returned does not include tags.
|
||||||
|
|
||||||
### Subscribe to a Database
|
### Subscribe to a Database
|
||||||
|
@ -274,10 +279,12 @@ Creating a topic in this manner differs from a `SELECT * from stbName` statement
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TOPIC topic_name [WITH META] AS DATABASE db_name;
|
CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
|
||||||
```
|
```
|
||||||
|
|
||||||
This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka.
|
This SQL statement creates a subscription to all tables in the database.
|
||||||
|
|
||||||
|
- The 'with meta' parameter is optional. When selected, it will return statements for creating all super tables and sub tables in the database, mainly used for Taosx database migration
|
||||||
|
|
||||||
## Create a Consumer
|
## Create a Consumer
|
||||||
|
|
||||||
|
@ -285,13 +292,13 @@ You configure the following parameters when creating a consumer:
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
| Parameter | Type | Description | Remarks |
|
||||||
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||||
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.ip` | string | IP address of the server side | |
|
||||||
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.user` | string | User Name | |
|
||||||
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.pass` | string | Password | |
|
||||||
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.port` | string | Port of the server side | |
|
||||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. |
|
||||||
| `client.id` | string | Client ID | Maximum length: 192. |
|
| `client.id` | string | Client ID | Maximum length: 192. |
|
||||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
| `auto.offset.reset` | enum | Initial offset for the consumer group | `earliest`: subscribe from the earliest data, this is the default behavior; `latest`: subscribe from the latest data; or `none`: can't subscribe without committed offset|
|
||||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||||
|
@ -325,6 +332,7 @@ Java programs use the following parameters:
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
| Parameter | Type | Description | Remarks |
|
||||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `td.connect.type` | string | connection type: "jni" means native connection, "ws" means websocket connection, the default is "jni" |
|
||||||
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
|
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
|
||||||
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
|
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
|
||||||
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
|
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
|
||||||
|
@ -399,22 +407,6 @@ from taos.tmq import Consumer
|
||||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
```
|
```
|
||||||
|
|
||||||
Python programs use the following parameters:
|
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
|
||||||
|:---------:|:----:|:-----------:|:-------:|
|
|
||||||
| `td.connect.ip` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.user` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.pass` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.port` | string | Used in establishing a connection||
|
|
||||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
|
|
||||||
| `client.id` | string | Client ID | Maximum length: 192 |
|
|
||||||
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
|
|
||||||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
|
||||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
|
||||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
|
||||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem label="Node.JS" value="Node.JS">
|
<TabItem label="Node.JS" value="Node.JS">
|
||||||
|
|
|
@ -10,10 +10,10 @@ TDengine uses various kinds of caching techniques to efficiently write and query
|
||||||
|
|
||||||
TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data.
|
TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data.
|
||||||
|
|
||||||
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode.
|
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. The unit of buffer is MB.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create database db0 vgroups 100 buffer 16MB
|
create database db0 vgroups 100 buffer 16
|
||||||
```
|
```
|
||||||
|
|
||||||
In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings.
|
In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings.
|
||||||
|
@ -28,10 +28,10 @@ When you create a database, you can configure whether the latest data from every
|
||||||
|
|
||||||
## Metadata Cache
|
## Metadata Cache
|
||||||
|
|
||||||
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters.
|
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. The unit of pagesize is kb.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create database db0 pages 128 pagesize 16kb
|
create database db0 pages 128 pagesize 16
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache.
|
The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache.
|
||||||
|
|
|
@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f
|
||||||
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
|
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
|
||||||
- To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function.
|
- To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function.
|
||||||
|
|
||||||
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
|
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
|
||||||
|
|
||||||
### Implementing a Scalar Function in C
|
### Implementing a Scalar Function in C
|
||||||
The implementation of a scalar function is described as follows:
|
The implementation of a scalar function is described as follows:
|
||||||
|
@ -270,15 +270,81 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
||||||
|
|
||||||
## Implement a UDF in Python
|
## Implement a UDF in Python
|
||||||
|
|
||||||
|
### Prepare Environment
|
||||||
|
|
||||||
|
1. Prepare Python Environment
|
||||||
|
|
||||||
|
Please follow standard procedure of python environment preparation.
|
||||||
|
|
||||||
|
2. Install Python package `taospyudf`
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pip3 install taospyudf
|
||||||
|
```
|
||||||
|
|
||||||
|
During this process, some C++ code needs to be compiled. So it's required to have `cmake` and `gcc` on your system. The compiled `libtaospyudf.so` will be automatically copied to `/usr/local/lib` path. If you are not root user, please use `sudo`. After installation is done, please check using the command below.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
root@slave11 ~/udf $ ls -l /usr/local/lib/libtaos*
|
||||||
|
-rw-r--r-- 1 root root 671344 May 24 22:54 /usr/local/lib/libtaospyudf.so
|
||||||
|
```
|
||||||
|
|
||||||
|
Then execute the command below.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ldconfig
|
||||||
|
```
|
||||||
|
|
||||||
|
3. If you want to utilize some 3rd party python packages in your Python UDF, please set configuration parameter `UdfdLdLibPath` to the value of `PYTHONPATH` before starting `taosd`.
|
||||||
|
|
||||||
|
4. Launch `taosd` service
|
||||||
|
|
||||||
|
Please refer to [Get Started](../../get-started)
|
||||||
|
|
||||||
|
### Interface definition
|
||||||
|
|
||||||
|
#### Introduction to Interface
|
||||||
|
|
||||||
Implement the specified interface functions when implementing a UDF in Python.
|
Implement the specified interface functions when implementing a UDF in Python.
|
||||||
- implement `process` function for the scalar UDF.
|
- implement `process` function for the scalar UDF.
|
||||||
- implement `start`, `reduce`, `finish` for the aggregate UDF.
|
- implement `start`, `reduce`, `finish` for the aggregate UDF.
|
||||||
- implement `init` for initialization and `destroy` for termination.
|
- implement `init` for initialization and `destroy` for termination.
|
||||||
|
|
||||||
### Implement a Scalar UDF in Python
|
#### Scalar UDF Interface
|
||||||
|
|
||||||
The implementation of a scalar UDF is described as follows:
|
The implementation of a scalar UDF is described as follows:
|
||||||
|
|
||||||
|
```Python
|
||||||
|
def process(input: datablock) -> tuple[output_type]:
|
||||||
|
```
|
||||||
|
|
||||||
|
Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
|
||||||
|
|
||||||
|
#### Aggregate UDF Interface
|
||||||
|
|
||||||
|
The implementation of an aggregate function is described as follows:
|
||||||
|
|
||||||
|
```Python
|
||||||
|
def start() -> bytes:
|
||||||
|
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||||
|
def finish(buf: bytes) -> output_type:
|
||||||
|
```
|
||||||
|
|
||||||
|
Description: first the start() is invoked to generate the initial result `buffer`; then the input data is divided into multiple row blocks, and reduce() is invoked for each block `inputs` and current intermediate result `buf`; finally finish() is invoked to generate the final result from intermediate `buf`, the final result can only contains 0 or 1 data.
|
||||||
|
|
||||||
|
#### Initialization and Cleanup Interface
|
||||||
|
|
||||||
|
```python
|
||||||
|
def init()
|
||||||
|
def destroy()
|
||||||
|
```
|
||||||
|
|
||||||
|
Description: init() does the work of initialization before processing any data; destroy() does the work of cleanup after the data is processed.
|
||||||
|
|
||||||
|
### Python UDF Template
|
||||||
|
|
||||||
|
#### Scalar Template
|
||||||
|
|
||||||
```Python
|
```Python
|
||||||
def init():
|
def init():
|
||||||
# initialization
|
# initialization
|
||||||
|
@ -290,9 +356,9 @@ def process(input: datablock) -> tuple[output_type]:
|
||||||
# return tuple object consisted of object of type outputtype
|
# return tuple object consisted of object of type outputtype
|
||||||
```
|
```
|
||||||
|
|
||||||
### Implement an Aggregate UDF in Python
|
Note:process() must be implemented, init() and destroy() must be defined too but they can do nothing.
|
||||||
|
|
||||||
The implementation of an aggregate function is described as follows:
|
#### Aggregate Template
|
||||||
|
|
||||||
```Python
|
```Python
|
||||||
def init():
|
def init():
|
||||||
|
@ -304,40 +370,14 @@ def start() -> bytes:
|
||||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||||
# deserialize buf to state
|
# deserialize buf to state
|
||||||
# reduce the inputs and state into new_state.
|
# reduce the inputs and state into new_state.
|
||||||
# use inputs.data(i,j) to access python ojbect of location(i,j)
|
# use inputs.data(i,j) to access python object of location(i,j)
|
||||||
# serialize new_state into new_state_bytes
|
# serialize new_state into new_state_bytes
|
||||||
return new_state_bytes
|
return new_state_bytes
|
||||||
def finish(buf: bytes) -> output_type:
|
def finish(buf: bytes) -> output_type:
|
||||||
#return obj of type outputtype
|
#return obj of type outputtype
|
||||||
```
|
```
|
||||||
|
|
||||||
### Python UDF Interface Definition
|
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
|
||||||
|
|
||||||
#### Scalar interface
|
|
||||||
```Python
|
|
||||||
def process(input: datablock) -> tuple[output_type]:
|
|
||||||
```
|
|
||||||
- `input` is a data block two-dimension matrix-like object, of which method `data(row, col)` returns the Python object located at location (`row`, `col`)
|
|
||||||
- return a Python tuple object, of which each item is a Python object of type `output_type`
|
|
||||||
|
|
||||||
#### Aggregate Interface
|
|
||||||
```Python
|
|
||||||
def start() -> bytes:
|
|
||||||
def reduce(input: datablock, buf: bytes) -> bytes
|
|
||||||
def finish(buf: bytes) -> output_type:
|
|
||||||
```
|
|
||||||
|
|
||||||
- first `start()` is called to return the initial result in type `bytes`
|
|
||||||
- then the input data are divided into multiple data blocks and for each block `input`, `reduce` is called with the data block `input` and the current result `buf` bytes and generates a new intermediate result buffer.
|
|
||||||
- finally, the `finish` function is called on the intermediate result `buf` and outputs 0 or 1 data of type `output_type`
|
|
||||||
|
|
||||||
|
|
||||||
#### Initialization and Cleanup Interface
|
|
||||||
```Python
|
|
||||||
def init()
|
|
||||||
def destroy()
|
|
||||||
```
|
|
||||||
Implement `init` for initialization and `destroy` for termination.
|
|
||||||
|
|
||||||
### Data Mapping between TDengine SQL and Python UDF
|
### Data Mapping between TDengine SQL and Python UDF
|
||||||
|
|
||||||
|
@ -353,15 +393,463 @@ The following table describes the mapping between TDengine SQL data type and Pyt
|
||||||
|TIMESTAMP | int |
|
|TIMESTAMP | int |
|
||||||
|JSON and other types | Not Supported |
|
|JSON and other types | Not Supported |
|
||||||
|
|
||||||
### Installing Python UDF
|
### Development Guide
|
||||||
1. Install Python package `taospyudf` that executes Python UDF
|
|
||||||
```bash
|
In this section we will demonstrate 5 examples of developing UDF in Python language. In this guide, you will learn the development skills from easy case to hard case, the examples include:
|
||||||
sudo pip install taospyudf
|
1. A scalar function which accepts only one integer as input and outputs ln(n^2 + 1)。
|
||||||
ldconfig
|
2. A scalar function which accepts n integers, like(x1, x2, ..., xn)and output the sum of the product of each input and its sequence number, i.e. x1 + 2 * x2 + ... + n * xn。
|
||||||
```
|
3. A scalar function which accepts a timestamp and output the next closest Sunday of the timestamp. In this case, we will demonstrate how to use 3rd party library `moment`.
|
||||||
2. If PYTHONPATH is needed to find Python packages when the Python UDF executes, include the PYTHONPATH contents into the udfdLdLibPath variable of the taos.cfg configuration file
|
4. An aggregate function which calculates the difference between the maximum and the minimum of a specific column, i.e. same functionality of built-in spread().
|
||||||
|
|
||||||
|
In the guide, some debugging skills of using Python UDF will be explained too.
|
||||||
|
|
||||||
|
We assume you are using Linux system and already have TDengine 3.0.4.0+ and Python 3.7+.
|
||||||
|
|
||||||
|
Note:**You can't use print() function to output log inside a UDF, you have to write the log to a specific file or use logging module of Python.**
|
||||||
|
|
||||||
|
#### Sample 1: Simplest UDF
|
||||||
|
|
||||||
|
This scalar UDF accepts an integer as input and output ln(n^2 + 1).
|
||||||
|
|
||||||
|
Firstly, please compose a Python source code file in your system and save it, e.g. `/root/udf/myfun.py`, the code is like below.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from math import log
|
||||||
|
|
||||||
|
def init():
|
||||||
|
pass
|
||||||
|
|
||||||
|
def destroy():
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process(block):
|
||||||
|
rows, _ = block.shape()
|
||||||
|
return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||||
|
```
|
||||||
|
|
||||||
|
This program consists of 3 functions, init() and destroy() do nothing, but they have to be defined even though there is nothing to do in them because they are critical parts of a python UDF. The most important function is process(), which accepts a data block and the data block object has two methods:
|
||||||
|
1. shape() returns the number of rows and the number of columns of the data block
|
||||||
|
2. data(i, j) returns the value at (i,j) in the block
|
||||||
|
|
||||||
|
The output of the process() function of a scalar UDF returns exactly same number of data as the number of input rows. We will ignore the number of columns because we just want to compute on the first column.
|
||||||
|
|
||||||
|
Then, we create the UDF using the SQL command below.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
|
||||||
|
```
|
||||||
|
|
||||||
|
Here is the output example, it may change a little depending on your version being used.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||||
|
Create OK, 0 row(s) affected (0.005202s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, we used the `show` command to prove the creation of the UDF is successful.
|
||||||
|
|
||||||
|
```text
|
||||||
|
taos> show functions;
|
||||||
|
name |
|
||||||
|
=================================
|
||||||
|
myfun |
|
||||||
|
Query OK, 1 row(s) in set (0.005767s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, we can try to test the function. Before executing the UDF, we need to prepare some data using the command below in TDengine CLI.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create database test;
|
||||||
|
create table t(ts timestamp, v1 int, v2 int, v3 int);
|
||||||
|
insert into t values('2023-05-01 12:13:14', 1, 2, 3);
|
||||||
|
insert into t values('2023-05-03 08:09:10', 2, 3, 4);
|
||||||
|
insert into t values('2023-05-10 07:06:05', 3, 4, 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
Execute the UDF to test it:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> select myfun(v1, v2) from t;
|
||||||
|
|
||||||
|
DB error: udf function execution failure (0.011088s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Unfortunately, the UDF execution failed. We need to check the log `udfd` daemon to find out why.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
tail -10 /var/log/taos/udfd.log
|
||||||
|
```
|
||||||
|
|
||||||
|
Below is the output.
|
||||||
|
|
||||||
|
```text
|
||||||
|
05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
|
||||||
|
05/24 22:46:28.733561 01665799 UDF ERROR can not load python plugin. lib path libtaospyudf.so
|
||||||
|
```
|
||||||
|
|
||||||
|
From the error message we can find out that `libtaospyudf.so` was not loaded successfully. Please refer to the [Prepare Environment] section.
|
||||||
|
|
||||||
|
After correcting environment issues, execute the UDF:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> select myfun(v1) from t;
|
||||||
|
myfun(v1) |
|
||||||
|
============================
|
||||||
|
0.693147181 |
|
||||||
|
1.609437912 |
|
||||||
|
2.302585093 |
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, we have finished the first PDF in Python, and learned some basic debugging skills.
|
||||||
|
|
||||||
|
#### Sample 2: Abnormal Processing
|
||||||
|
|
||||||
|
The `myfun` UDF example in sample 1 has passed, but it has two drawbacks.
|
||||||
|
|
||||||
|
1. It the program accepts only one column of data as input, but it doesn't throw exception if you passes multiple columns.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> select myfun(v1, v2) from t;
|
||||||
|
myfun(v1, v2) |
|
||||||
|
============================
|
||||||
|
0.693147181 |
|
||||||
|
1.609437912 |
|
||||||
|
2.302585093 |
|
||||||
|
```
|
||||||
|
|
||||||
|
2. `null` value is not processed. We expect the program to throw exception and terminate if `null` is passed as input.
|
||||||
|
|
||||||
|
So, we try to optimize the process() function as below.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process(block):
|
||||||
|
rows, cols = block.shape()
|
||||||
|
if cols > 1:
|
||||||
|
raise Exception(f"require 1 parameter but given {cols}")
|
||||||
|
return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||||
|
```
|
||||||
|
|
||||||
|
The update the UDF with command below.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
At this time, if we pass two arguments to `myfun`, the execution would fail.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> select myfun(v1, v2) from t;
|
||||||
|
|
||||||
|
DB error: udf function execution failure (0.014643s)
|
||||||
|
```
|
||||||
|
|
||||||
|
However, the exception is not shown to end user, but displayed in the log file `/var/log/taos/taospyudf.log`
|
||||||
|
|
||||||
|
```text
|
||||||
|
2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
|
||||||
|
|
||||||
|
At:
|
||||||
|
/var/lib/taos//.udf/myfun_3_1884e1281d9.py(12): process
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, we have learned how to update a UDF and check the log of a UDF.
|
||||||
|
|
||||||
|
Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart `taosd` service. After 3.0.5.0, restarting is not required.
|
||||||
|
|
||||||
|
#### Sample 3: UDF with n arguments
|
||||||
|
|
||||||
|
A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def init():
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def destroy():
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def process(block):
|
||||||
|
rows, cols = block.shape()
|
||||||
|
result = []
|
||||||
|
for i in range(rows):
|
||||||
|
total = 0
|
||||||
|
for j in range(cols):
|
||||||
|
v = block.data(i, j)
|
||||||
|
if v is None:
|
||||||
|
total = None
|
||||||
|
break
|
||||||
|
total += (j + 1) * block.data(i, j)
|
||||||
|
result.append(total)
|
||||||
|
return result
|
||||||
|
```
|
||||||
|
|
||||||
|
Crate and test the UDF:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
|
||||||
|
Insert OK, 1 row(s) affected (0.003675s)
|
||||||
|
|
||||||
|
taos> select ts, v1, v2, v3, nsum(v1, v2, v3) from t;
|
||||||
|
ts | v1 | v2 | v3 | nsum(v1, v2, v3) |
|
||||||
|
================================================================================================
|
||||||
|
2023-05-01 12:13:14.000 | 1 | 2 | 3 | 14.000000000 |
|
||||||
|
2023-05-03 08:09:10.000 | 2 | 3 | 4 | 20.000000000 |
|
||||||
|
2023-05-10 07:06:05.000 | 3 | 4 | 5 | 26.000000000 |
|
||||||
|
2023-05-25 09:09:15.000 | 6 | NULL | 8 | NULL |
|
||||||
|
Query OK, 4 row(s) in set (0.010653s)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sample 4: Utilize 3rd party package
|
||||||
|
|
||||||
|
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pip3 install moment
|
||||||
|
```
|
||||||
|
|
||||||
|
Then compose the Python code in /root/udf/nextsunday.py
|
||||||
|
|
||||||
|
```python
|
||||||
|
import moment
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def destroy():
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def process(block):
|
||||||
|
rows, cols = block.shape()
|
||||||
|
if cols > 1:
|
||||||
|
raise Exception("require only 1 parameter")
|
||||||
|
if not type(block.data(0, 0)) is int:
|
||||||
|
raise Exception("type error")
|
||||||
|
return [moment.unix(block.data(i, 0)).replace(weekday=7).format('YYYY-MM-DD')
|
||||||
|
for i in range(rows)]
|
||||||
|
```
|
||||||
|
|
||||||
|
UDF framework will map the TDengine timestamp to Python int type, so this function only accepts an integer representing millisecond. process() firstly validates the parameters, then use `moment` to replace the time, format the result and output.
|
||||||
|
|
||||||
|
Create and test the UDF.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
If your `taosd` is started using `systemd`, you may encounter the error below. Next we will show how to debug.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> select ts, nextsunday(ts) from t;
|
||||||
|
|
||||||
|
DB error: udf function execution failure (1.123615s)
|
||||||
|
```
|
||||||
|
|
||||||
|
```shell
|
||||||
|
tail -20 taospyudf.log
|
||||||
|
2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
|
||||||
|
```
|
||||||
|
|
||||||
|
This is because `moment` doesn't exist in the default library search path of python UDF, please check the log file `taosdpyudf.log`.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
grep 'sys path' taospyudf.log | tail -1
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
2023-05-25 10:58:48.554 INFO [1679419] [doPyOpen@592] python sys path: ['', '/lib/python38.zip', '/lib/python3.8', '/lib/python3.8/lib-dynload', '/lib/python3/dist-packages', '/var/lib/taos//.udf']
|
||||||
|
```
|
||||||
|
|
||||||
|
You may find that the default library search path is `/lib/python3/dist-packages` (just for example, it may be different in your system), but `moment` is installed to `/usr/local/lib/python3.8/dist-packages` (for example, it may be different in your system). Then we change the library search path of python UDF.
|
||||||
|
|
||||||
|
Check `sys.path`, which must include the packages you install with pip3 command previously, as shown below:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> import sys
|
||||||
|
>>> ":".join(sys.path)
|
||||||
|
'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
|
||||||
|
```
|
||||||
|
|
||||||
|
Copy the output and edit /var/taos/taos.cfg to add below configuration parameter.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
|
||||||
|
```
|
||||||
|
|
||||||
|
Save it, then restart `taosd`, using `systemctl restart taosd`, and test again, it will succeed this time.
|
||||||
|
|
||||||
|
Note: If your cluster consists of multiple `taosd` instances, you have to repeat same process for each of them.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> select ts, nextsunday(ts) from t;
|
||||||
|
ts | nextsunday(ts) |
|
||||||
|
===========================================
|
||||||
|
2023-05-01 12:13:14.000 | 2023-05-07 |
|
||||||
|
2023-05-03 08:09:10.000 | 2023-05-07 |
|
||||||
|
2023-05-10 07:06:05.000 | 2023-05-14 |
|
||||||
|
2023-05-25 09:09:15.000 | 2023-05-28 |
|
||||||
|
Query OK, 4 row(s) in set (1.011474s)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sample 5: Aggregate Function
|
||||||
|
|
||||||
|
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import io
|
||||||
|
import math
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
LOG_FILE: io.TextIOBase = None
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
global LOG_FILE
|
||||||
|
LOG_FILE = open("/var/log/taos/spread.log", "wt")
|
||||||
|
log("init function myspead success")
|
||||||
|
|
||||||
|
|
||||||
|
def log(o):
|
||||||
|
LOG_FILE.write(str(o) + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
def destroy():
|
||||||
|
log("close log file: spread.log")
|
||||||
|
LOG_FILE.close()
|
||||||
|
|
||||||
|
|
||||||
|
def start():
|
||||||
|
return pickle.dumps((-math.inf, math.inf))
|
||||||
|
|
||||||
|
|
||||||
|
def reduce(block, buf):
|
||||||
|
max_number, min_number = pickle.loads(buf)
|
||||||
|
log(f"initial max_number={max_number}, min_number={min_number}")
|
||||||
|
rows, _ = block.shape()
|
||||||
|
for i in range(rows):
|
||||||
|
v = block.data(i, 0)
|
||||||
|
if v > max_number:
|
||||||
|
log(f"max_number={v}")
|
||||||
|
max_number = v
|
||||||
|
if v < min_number:
|
||||||
|
log(f"min_number={v}")
|
||||||
|
min_number = v
|
||||||
|
return pickle.dumps((max_number, min_number))
|
||||||
|
|
||||||
|
|
||||||
|
def finish(buf):
|
||||||
|
max_number, min_number = pickle.loads(buf)
|
||||||
|
return max_number - min_number
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example, we implemented an aggregate function, and added some logging.
|
||||||
|
1. init() opens a file for logging
|
||||||
|
2. log() is the function for logging, it converts the input object to string and output with an end of line
|
||||||
|
3. destroy() closes the log file \
|
||||||
|
4. start() returns the initial buffer for storing the intermediate result
|
||||||
|
5. reduce() processes each data block and aggregates the result
|
||||||
|
6. finish() converts the final buffer() to final result\
|
||||||
|
|
||||||
|
Create the UDF.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
This SQL command has two important different points from the command creating scalar UDF.
|
||||||
|
1. keyword `aggregate` is used
|
||||||
|
2. keyword `bufsize` is used to specify the memory size for storing the intermediate result. In this example, the result is 32 bytes, but we specified 128 bytes for `bufsize`. You can use the `python` CLI to print actual size.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> len(pickle.dumps((12345.6789, 23456789.9877)))
|
||||||
|
32
|
||||||
|
```
|
||||||
|
|
||||||
|
Test this function, you can see the result is same as built-in spread() function. \
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> select myspread(v1) from t;
|
||||||
|
myspread(v1) |
|
||||||
|
============================
|
||||||
|
5.000000000 |
|
||||||
|
Query OK, 1 row(s) in set (0.013486s)
|
||||||
|
|
||||||
|
taos> select spread(v1) from t;
|
||||||
|
spread(v1) |
|
||||||
|
============================
|
||||||
|
5.000000000 |
|
||||||
|
Query OK, 1 row(s) in set (0.005501s)
|
||||||
|
```
|
||||||
|
|
||||||
|
At last, check the log file, we can see that the reduce() function is executed 3 times, max value is updated 3 times and min value is updated only one time.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
root@slave11 /var/log/taos $ cat spread.log
|
||||||
|
init function myspead success
|
||||||
|
initial max_number=-inf, min_number=inf
|
||||||
|
max_number=1
|
||||||
|
min_number=1
|
||||||
|
initial max_number=1, min_number=1
|
||||||
|
max_number=2
|
||||||
|
max_number=3
|
||||||
|
initial max_number=3, min_number=1
|
||||||
|
max_number=6
|
||||||
|
close log file: spread.log
|
||||||
|
```
|
||||||
|
|
||||||
|
### SQL Commands
|
||||||
|
|
||||||
|
1. Create Scalar UDF
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create Aggregate UDF
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Update Scalar UDF
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE OR REPLACE FUNCTION function_name AS OUTPUTTYPE int LANGUAGE 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Update Aggregate UDF
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE OR REPLACE AGGREGATE FUNCTION function_name AS OUTPUTTYPE BUFSIZE buf_size int LANGUAGE 'Python';
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: If keyword `AGGREGATE` used, the UDF will be treated as aggregate UDF despite what it was before; Similarly, if there is no keyword `aggregate`, the UDF will be treated as scalar function despite what it was before.
|
||||||
|
|
||||||
|
5. Show the UDF
|
||||||
|
|
||||||
|
The version of a UDF is increased by one every time it's updated.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
select * from ins_functions \G;
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Show and Drop existing UDF
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW functions;
|
||||||
|
DROP FUNCTION function_name;
|
||||||
|
```
|
||||||
|
|
||||||
|
### More Python UDF Samples
|
||||||
|
|
||||||
### Python UDF Sample Code
|
|
||||||
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
|
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
|
||||||
|
|
||||||
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
|
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
|
||||||
|
@ -377,7 +865,7 @@ The `pybitand` function implements bitwise addition for multiple columns. If the
|
||||||
|
|
||||||
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
||||||
|
|
||||||
The `pyl2norm` function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
|
The `pyl2norm` function finds the second-order norm for all data in the input columns. This squares the values, takes a cumulative sum, and finds the square root.
|
||||||
<details>
|
<details>
|
||||||
<summary>pyl2norm.py</summary>
|
<summary>pyl2norm.py</summary>
|
||||||
|
|
||||||
|
@ -387,5 +875,16 @@ The `pyl2norm` function finds the second-order norm for all data in the input co
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
#### Aggregate Function [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
|
||||||
|
|
||||||
|
The `pycumsum` function finds the cumulative sum for all data in the input columns.
|
||||||
|
<details>
|
||||||
|
<summary>pycumsum.py</summary>
|
||||||
|
|
||||||
|
```c
|
||||||
|
{{#include tests/script/sh/pycumsum.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
## Manage and Use UDF
|
## Manage and Use UDF
|
||||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
||||||
|
|
|
@ -63,11 +63,12 @@ serverPort 6030
|
||||||
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
|
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
|
||||||
|
|
||||||
| **#** | **Parameter** | **Definition** |
|
| **#** | **Parameter** | **Definition** |
|
||||||
| ----- | ------------------ | ------------------------------------------- |
|
| ----- | ---------------- | ----------------------------------------------------------------------------- |
|
||||||
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
|
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
|
||||||
| 2 | timezone | Timezone |
|
| 2 | timezone | Timezone |
|
||||||
| 3 | locale | System region and encoding |
|
| 3 | locale | System region and encoding |
|
||||||
| 4 | charset | Character set |
|
| 4 | charset | Character set |
|
||||||
|
| 5 | ttlChangeOnWrite | Whether the ttl expiration time changes with the table modification operation |
|
||||||
|
|
||||||
## Start Cluster
|
## Start Cluster
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Deploying TDengine with Docker
|
title: Deploying TDengine with Docker
|
||||||
|
sidebar_label: Docker
|
||||||
description: This chapter describes how to start and access TDengine in a Docker container.
|
description: This chapter describes how to start and access TDengine in a Docker container.
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -10,8 +11,17 @@ This chapter describes how to start the TDengine service in a container and acce
|
||||||
The TDengine image starts with the HTTP service activated by default, using the following command:
|
The TDengine image starts with the HTTP service activated by default, using the following command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
|
docker run -d --name tdengine \
|
||||||
|
-v ~/data/taos/dnode/data:/var/lib/taos \
|
||||||
|
-v ~/data/taos/dnode/log:/var/log/taos \
|
||||||
|
-p 6041:6041 tdengine/tdengine
|
||||||
```
|
```
|
||||||
|
:::note
|
||||||
|
|
||||||
|
* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. And also you can modify ~/data/taos/dnode/data to your any other local emtpy data directory
|
||||||
|
* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. And also you can modify ~/data/taos/dnode/log to your any other local empty log directory
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
|
The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
|
||||||
|
|
||||||
|
@ -283,39 +293,38 @@ services:
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-1"
|
TAOS_FQDN: "td-1"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
|
ports:
|
||||||
|
- 6041:6041
|
||||||
|
- 6030:6030
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td1:/var/lib/taos/
|
# /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory
|
||||||
- taoslog-td1:/var/log/taos/
|
- ~/data/taos/dnode1/data:/var/lib/taos
|
||||||
|
# /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory
|
||||||
|
- ~/data/taos/dnode1/log:/var/log/taos
|
||||||
td-2:
|
td-2:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-2"
|
TAOS_FQDN: "td-2"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td2:/var/lib/taos/
|
- ~/data/taos/dnode2/data:/var/lib/taos
|
||||||
- taoslog-td2:/var/log/taos/
|
- ~/data/taos/dnode2/log:/var/log/taos
|
||||||
td-3:
|
td-3:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-3"
|
TAOS_FQDN: "td-3"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td3:/var/lib/taos/
|
- ~/data/taos/dnode3/data:/var/lib/taos
|
||||||
- taoslog-td3:/var/log/taos/
|
- ~/data/taos/dnode3/log:/var/log/taos
|
||||||
volumes:
|
|
||||||
taosdata-td1:
|
|
||||||
taoslog-td1:
|
|
||||||
taosdata-td2:
|
|
||||||
taoslog-td2:
|
|
||||||
taosdata-td3:
|
|
||||||
taoslog-td3:
|
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- The `VERSION` environment variable is used to set the tdengine image tag
|
- The `VERSION` environment variable is used to set the tdengine image tag
|
||||||
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
|
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
|
||||||
:::
|
|
||||||
|
:::
|
||||||
|
|
||||||
2. Start the cluster
|
2. Start the cluster
|
||||||
|
|
||||||
|
@ -382,24 +391,22 @@ networks:
|
||||||
services:
|
services:
|
||||||
td-1:
|
td-1:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
networks:
|
|
||||||
- inter
|
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-1"
|
TAOS_FQDN: "td-1"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td1:/var/lib/taos/
|
# /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory
|
||||||
- taoslog-td1:/var/log/taos/
|
- ~/data/taos/dnode1/data:/var/lib/taos
|
||||||
|
# /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory
|
||||||
|
- ~/data/taos/dnode1/log:/var/log/taos
|
||||||
td-2:
|
td-2:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
networks:
|
|
||||||
- inter
|
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-2"
|
TAOS_FQDN: "td-2"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td2:/var/lib/taos/
|
- ~/data/taos/dnode2/data:/var/lib/taos
|
||||||
- taoslog-td2:/var/log/taos/
|
- ~/data/taos/dnode2/log:/var/log/taos
|
||||||
adapter:
|
adapter:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
entrypoint: "taosadapter"
|
entrypoint: "taosadapter"
|
||||||
|
@ -431,11 +438,6 @@ services:
|
||||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||||
nginx -g 'daemon off;'",
|
nginx -g 'daemon off;'",
|
||||||
]
|
]
|
||||||
volumes:
|
|
||||||
taosdata-td1:
|
|
||||||
taoslog-td1:
|
|
||||||
taosdata-td2:
|
|
||||||
taoslog-td2:
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploy with docker swarm
|
## Deploy with docker swarm
|
|
@ -4,23 +4,31 @@ sidebar_label: Kubernetes
|
||||||
description: This document describes how to deploy TDengine on Kubernetes.
|
description: This document describes how to deploy TDengine on Kubernetes.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine is a cloud-native time-series database that can be deployed on Kubernetes. This document gives a step-by-step description of how you can use YAML files to create a TDengine cluster and introduces common operations for TDengine in a Kubernetes environment.
|
## Overview
|
||||||
|
|
||||||
|
As a time series database for Cloud Native architecture design, TDengine supports Kubernetes deployment. Firstly we introduce how to use YAML files to create a highly available TDengine cluster from scratch step by step for production usage, and highlight the common operations of TDengine in Kubernetes environment.
|
||||||
|
|
||||||
|
To meet [high availability ](https://docs.taosdata.com/tdinternal/high-availability/)requirements, clusters need to meet the following requirements:
|
||||||
|
|
||||||
|
- 3 or more dnodes: multiple vnodes in the same vgroup of TDengine are not allowed to be distributed in one dnode at the same time, so if you create a database with 3 replicas, the number of dnodes is greater than or equal to 3
|
||||||
|
- 3 mnodes: mnode is responsible for the management of the entire TDengine cluster. The default number of mnode in TDengine cluster is only one. If the dnode where the mnode located is dropped, the entire cluster is unavailable.
|
||||||
|
- Database 3 replicas: The TDengine replica configuration is the database level, so 3 replicas for the database must need three dnodes in the cluster. If any one dnode is offline, does not affect the normal usage of the whole cluster. **If the number of offline** **dnodes** **is 2, then the cluster is not available,** **because** ** the cluster can not complete the election based on RAFT** **.** (Enterprise version: in the disaster recovery scenario, any node data file is damaged, can be restored by pulling up the dnode again)
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
Before deploying TDengine on Kubernetes, perform the following:
|
Before deploying TDengine on Kubernetes, perform the following:
|
||||||
|
|
||||||
* Current steps are compatible with Kubernetes v1.5 and later version.
|
- This article applies Kubernetes 1.19 and above
|
||||||
* Install and configure minikube, kubectl, and helm.
|
- This article uses the **kubectl** tool to install and deploy, please install the corresponding software in advance
|
||||||
* Install and deploy Kubernetes and ensure that it can be accessed and used normally. Update any container registries or other services as necessary.
|
- Kubernetes have been installed and deployed and can access or update the necessary container repositories or other services
|
||||||
|
|
||||||
You can download the configuration files in this document from [GitHub](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine).
|
You can download the configuration files in this document from [GitHub](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine).
|
||||||
|
|
||||||
## Configure the service
|
## Configure the service
|
||||||
|
|
||||||
Create a service configuration file named `taosd-service.yaml`. Record the value of `metadata.name` (in this example, `taos`) for use in the next step. Add the ports required by TDengine:
|
Create a service configuration file named `taosd-service.yaml`. Record the value of `metadata.name` (in this example, `taos`) for use in the next step. And then add the ports required by TDengine and record the value of the selector label "app" (in this example, `tdengine`) for use in the next step:
|
||||||
|
|
||||||
```yaml
|
```YAML
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
@ -31,10 +39,10 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: tcp6030
|
- name: tcp6030
|
||||||
- protocol: "TCP"
|
protocol: "TCP"
|
||||||
port: 6030
|
port: 6030
|
||||||
- name: tcp6041
|
- name: tcp6041
|
||||||
- protocol: "TCP"
|
protocol: "TCP"
|
||||||
port: 6041
|
port: 6041
|
||||||
selector:
|
selector:
|
||||||
app: "tdengine"
|
app: "tdengine"
|
||||||
|
@ -42,10 +50,11 @@ spec:
|
||||||
|
|
||||||
## Configure the service as StatefulSet
|
## Configure the service as StatefulSet
|
||||||
|
|
||||||
Configure the TDengine service as a StatefulSet.
|
According to Kubernetes instructions for various deployments, we will use StatefulSet as the deployment resource type of TDengine. Create the file `tdengine.yaml `, where replicas defines the number of cluster nodes as 3. The node time zone is China (Asia/Shanghai), and each node is allocated 5G standard storage (refer to the [Storage Classes ](https://kubernetes.io/docs/concepts/storage/storage-classes/)configuration storage class). You can also modify accordingly according to the actual situation.
|
||||||
Create the `tdengine.yaml` file and set `replicas` to 3. In this example, the region is set to Asia/Shanghai and 10 GB of standard storage are allocated per node. You can change the configuration based on your environment and business requirements.
|
|
||||||
|
|
||||||
```yaml
|
Please pay special attention to the startupProbe configuration. If dnode's Pod drops for a period of time and then restart, the newly launched dnode Pod will be temporarily unavailable. The reason is the startupProbe configuration is too small, Kubernetes will know that the Pod is in an abnormal state and try to restart it, then the dnode's Pod will restart frequently and never return to the normal status. Refer to [Configure Liveness, Readiness and Startup Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
|
||||||
|
|
||||||
|
```YAML
|
||||||
---
|
---
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: StatefulSet
|
kind: StatefulSet
|
||||||
|
@ -69,14 +78,14 @@ spec:
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: "tdengine"
|
- name: "tdengine"
|
||||||
image: "tdengine/tdengine:3.0.0.0"
|
image: "tdengine/tdengine:3.0.7.1"
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
ports:
|
ports:
|
||||||
- name: tcp6030
|
- name: tcp6030
|
||||||
- protocol: "TCP"
|
protocol: "TCP"
|
||||||
containerPort: 6030
|
containerPort: 6030
|
||||||
- name: tcp6041
|
- name: tcp6041
|
||||||
- protocol: "TCP"
|
protocol: "TCP"
|
||||||
containerPort: 6041
|
containerPort: 6041
|
||||||
env:
|
env:
|
||||||
# POD_NAME for FQDN config
|
# POD_NAME for FQDN config
|
||||||
|
@ -102,12 +111,18 @@ spec:
|
||||||
# Must set if you want a cluster.
|
# Must set if you want a cluster.
|
||||||
- name: TAOS_FIRST_EP
|
- name: TAOS_FIRST_EP
|
||||||
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
|
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
|
||||||
# TAOS_FQDN should always be set in k8s env.
|
# TAOS_FQND should always be set in k8s env.
|
||||||
- name: TAOS_FQDN
|
- name: TAOS_FQDN
|
||||||
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
|
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: taosdata
|
- name: taosdata
|
||||||
mountPath: /var/lib/taos
|
mountPath: /var/lib/taos
|
||||||
|
startupProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- taos-check
|
||||||
|
failureThreshold: 360
|
||||||
|
periodSeconds: 10
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
exec:
|
exec:
|
||||||
command:
|
command:
|
||||||
|
@ -129,266 +144,401 @@ spec:
|
||||||
storageClassName: "standard"
|
storageClassName: "standard"
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: "10Gi"
|
storage: "5Gi"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Use kubectl to deploy TDengine
|
## Use kubectl to deploy TDengine
|
||||||
|
|
||||||
Run the following commands:
|
First create the corresponding namespace, and then execute the following command in sequence :
|
||||||
|
|
||||||
```bash
|
```Bash
|
||||||
kubectl apply -f taosd-service.yaml
|
kubectl apply -f taosd-service.yaml -n tdengine-test
|
||||||
kubectl apply -f tdengine.yaml
|
kubectl apply -f tdengine.yaml -n tdengine-test
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceding configuration generates a TDengine cluster with three nodes in which dnodes are automatically configured. You can run the `show dnodes` command to query the nodes in the cluster:
|
The above configuration will generate a three-node TDengine cluster, dnode is automatically configured, you can use the **show dnodes** command to view the nodes of the current cluster:
|
||||||
|
|
||||||
```bash
|
```Bash
|
||||||
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show dnodes"
|
||||||
kubectl exec -i -t tdengine-1 -- taos -s "show dnodes"
|
kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show dnodes"
|
||||||
kubectl exec -i -t tdengine-2 -- taos -s "show dnodes"
|
kubectl exec -it tdengine-2 -n tdengine-test -- taos -s "show dnodes"
|
||||||
```
|
```
|
||||||
|
|
||||||
The output is as follows:
|
The output is as follows:
|
||||||
|
|
||||||
```
|
```Bash
|
||||||
taos> show dnodes
|
taos> show dnodes
|
||||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
|
||||||
============================================================================================================================================
|
=============================================================================================================================================================================================================================================
|
||||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
1 | tdengine-0.ta... | 0 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-19 17:54:18.469 | | | |
|
||||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
2 | tdengine-1.ta... | 0 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-19 17:54:38.698 | | | |
|
||||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
3 | tdengine-2.ta... | 0 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-19 17:55:02.039 | | | |
|
||||||
Query OK, 3 rows in database (0.003655s)
|
Query OK, 3 row(s) in set (0.001853s)
|
||||||
|
```
|
||||||
|
|
||||||
|
View the current mnode
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"
|
||||||
|
taos> show mnodes\G
|
||||||
|
*************************** 1.row ***************************
|
||||||
|
id: 1
|
||||||
|
endpoint: tdengine-0.taosd.tdengine-test.svc.cluster.local:6030
|
||||||
|
role: leader
|
||||||
|
status: ready
|
||||||
|
create_time: 2023-07-19 17:54:18.559
|
||||||
|
reboot_time: 2023-07-19 17:54:19.520
|
||||||
|
Query OK, 1 row(s) in set (0.001282s)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Create mnode
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "create mnode on dnode 2"
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "create mnode on dnode 3"
|
||||||
|
```
|
||||||
|
|
||||||
|
View mnode
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"
|
||||||
|
|
||||||
|
taos> show mnodes\G
|
||||||
|
*************************** 1.row ***************************
|
||||||
|
id: 1
|
||||||
|
endpoint: tdengine-0.taosd.tdengine-test.svc.cluster.local:6030
|
||||||
|
role: leader
|
||||||
|
status: ready
|
||||||
|
create_time: 2023-07-19 17:54:18.559
|
||||||
|
reboot_time: 2023-07-20 09:19:36.060
|
||||||
|
*************************** 2.row ***************************
|
||||||
|
id: 2
|
||||||
|
endpoint: tdengine-1.taosd.tdengine-test.svc.cluster.local:6030
|
||||||
|
role: follower
|
||||||
|
status: ready
|
||||||
|
create_time: 2023-07-20 09:22:05.600
|
||||||
|
reboot_time: 2023-07-20 09:22:12.838
|
||||||
|
*************************** 3.row ***************************
|
||||||
|
id: 3
|
||||||
|
endpoint: tdengine-2.taosd.tdengine-test.svc.cluster.local:6030
|
||||||
|
role: follower
|
||||||
|
status: ready
|
||||||
|
create_time: 2023-07-20 09:22:20.042
|
||||||
|
reboot_time: 2023-07-20 09:22:23.271
|
||||||
|
Query OK, 3 row(s) in set (0.003108s)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Enable port forwarding
|
## Enable port forwarding
|
||||||
|
|
||||||
The kubectl port forwarding feature allows applications to access the TDengine cluster running on Kubernetes.
|
Kubectl port forwarding enables applications to access TDengine clusters running in Kubernetes environments.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
kubectl port-forward tdengine-0 6041:6041 &
|
kubectl port-forward -n tdengine-test tdengine-0 6041:6041 &
|
||||||
```
|
```
|
||||||
|
|
||||||
Use curl to verify that the TDengine REST API is working on port 6041:
|
Use **curl** to verify that the TDengine REST API is working on port 6041:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
|
||||||
Handling connection for 6041
|
{"code":0,"column_meta":[["name","VARCHAR",64]],"data":[["information_schema"],["performance_schema"],["test"],["test1"]],"rows":4}
|
||||||
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Enable the dashboard for visualization
|
## Test cluster
|
||||||
|
|
||||||
The minikube dashboard command enables visualized cluster management.
|
### Data preparation
|
||||||
|
|
||||||
```
|
#### taosBenchmark
|
||||||
$ minikube dashboard
|
|
||||||
* Verifying dashboard health ...
|
Create a 3 replicas database with taosBenchmark, write 100 million data at the same time, and view the data at the same time
|
||||||
* Launching proxy ...
|
|
||||||
* Verifying proxy health ...
|
```Bash
|
||||||
* Opening http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser...
|
kubectl exec -it tdengine-0 -n tdengine-test -- taosBenchmark -I stmt -d test -n 10000 -t 10000 -a 3
|
||||||
http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
|
|
||||||
|
# query data
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "select count(*) from test.meters;"
|
||||||
|
|
||||||
|
taos> select count(*) from test.meters;
|
||||||
|
count(*) |
|
||||||
|
========================
|
||||||
|
100000000 |
|
||||||
|
Query OK, 1 row(s) in set (0.103537s)
|
||||||
```
|
```
|
||||||
|
|
||||||
In some public clouds, minikube cannot be remotely accessed if it is bound to 127.0.0.1. In this case, use the kubectl proxy command to map the port to 0.0.0.0. Then, you can access the dashboard by using a web browser to open the dashboard URL above on the public IP address and port of the virtual machine.
|
View vnode distribution by showing dnodes
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show dnodes"
|
||||||
|
|
||||||
|
taos> show dnodes
|
||||||
|
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
|
||||||
|
=============================================================================================================================================================================================================================================
|
||||||
|
1 | tdengine-0.ta... | 8 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-19 17:54:18.469 | | | |
|
||||||
|
2 | tdengine-1.ta... | 8 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-19 17:54:38.698 | | | |
|
||||||
|
3 | tdengine-2.ta... | 8 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-19 17:55:02.039 | | | |
|
||||||
|
Query OK, 3 row(s) in set (0.001357s)
|
||||||
```
|
```
|
||||||
$ kubectl proxy --accept-hosts='^.*$' --address='0.0.0.0'
|
|
||||||
|
View xnode distribution by showing vgroup
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show test.vgroups"
|
||||||
|
|
||||||
|
taos> show test.vgroups
|
||||||
|
vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | v4_dnode | v4_status | cacheload | cacheelements | tsma |
|
||||||
|
==============================================================================================================================================================================================
|
||||||
|
2 | test | 1267 | 1 | follower | 2 | follower | 3 | leader | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
3 | test | 1215 | 1 | follower | 2 | leader | 3 | follower | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
4 | test | 1215 | 1 | leader | 2 | follower | 3 | follower | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
5 | test | 1307 | 1 | follower | 2 | leader | 3 | follower | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
6 | test | 1245 | 1 | follower | 2 | follower | 3 | leader | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
7 | test | 1275 | 1 | follower | 2 | leader | 3 | follower | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
8 | test | 1231 | 1 | leader | 2 | follower | 3 | follower | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
9 | test | 1245 | 1 | follower | 2 | follower | 3 | leader | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
Query OK, 8 row(s) in set (0.001488s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Manually created
|
||||||
|
|
||||||
|
Common a three-copy test1, and create a table, write 2 pieces of data
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- \
|
||||||
|
taos -s \
|
||||||
|
"create database if not exists test1 replica 3;
|
||||||
|
use test1;
|
||||||
|
create table if not exists t1(ts timestamp, n int);
|
||||||
|
insert into t1 values(now, 1)(now+1s, 2);"
|
||||||
|
```
|
||||||
|
|
||||||
|
View xnode distribution by showing test1.vgroup
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show test1.vgroups"
|
||||||
|
|
||||||
|
taos> show test1.vgroups
|
||||||
|
vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | v4_dnode | v4_status | cacheload | cacheelements | tsma |
|
||||||
|
==============================================================================================================================================================================================
|
||||||
|
10 | test1 | 1 | 1 | follower | 2 | follower | 3 | leader | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
11 | test1 | 0 | 1 | follower | 2 | leader | 3 | follower | NULL | NULL | 0 | 0 | 0 |
|
||||||
|
Query OK, 2 row(s) in set (0.001489s)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test fault tolerance
|
||||||
|
|
||||||
|
The dnode where the mnode leader is located is disconnected, dnode1
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl get pod -l app=tdengine -n tdengine-test -o wide
|
||||||
|
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||||
|
tdengine-0 0/1 ErrImagePull 2 (2s ago) 20m 10.244.2.75 node86 <none> <none>
|
||||||
|
tdengine-1 1/1 Running 1 (6m48s ago) 20m 10.244.0.59 node84 <none> <none>
|
||||||
|
tdengine-2 1/1 Running 0 21m 10.244.1.223 node85 <none> <none>
|
||||||
|
```
|
||||||
|
|
||||||
|
At this time, the cluster mnode has a re-election, and the monde on dnode1 becomes the leader.
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"
|
||||||
|
Welcome to the TDengine Command Line Interface, Client Version:3.0.7.1.202307190706
|
||||||
|
Copyright (c) 2022 by TDengine, all rights reserved.
|
||||||
|
|
||||||
|
taos> show mnodes\G
|
||||||
|
*************************** 1.row ***************************
|
||||||
|
id: 1
|
||||||
|
endpoint: tdengine-0.taosd.tdengine-test.svc.cluster.local:6030
|
||||||
|
role: offline
|
||||||
|
status: offline
|
||||||
|
create_time: 2023-07-19 17:54:18.559
|
||||||
|
reboot_time: 1970-01-01 08:00:00.000
|
||||||
|
*************************** 2.row ***************************
|
||||||
|
id: 2
|
||||||
|
endpoint: tdengine-1.taosd.tdengine-test.svc.cluster.local:6030
|
||||||
|
role: leader
|
||||||
|
status: ready
|
||||||
|
create_time: 2023-07-20 09:22:05.600
|
||||||
|
reboot_time: 2023-07-20 09:32:00.227
|
||||||
|
*************************** 3.row ***************************
|
||||||
|
id: 3
|
||||||
|
endpoint: tdengine-2.taosd.tdengine-test.svc.cluster.local:6030
|
||||||
|
role: follower
|
||||||
|
status: ready
|
||||||
|
create_time: 2023-07-20 09:22:20.042
|
||||||
|
reboot_time: 2023-07-20 09:32:00.026
|
||||||
|
Query OK, 3 row(s) in set (0.001513s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Cluster can read and write normally
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
# insert
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "insert into test1.t1 values(now, 1)(now+1s, 2);"
|
||||||
|
|
||||||
|
taos> insert into test1.t1 values(now, 1)(now+1s, 2);
|
||||||
|
Insert OK, 2 row(s) affected (0.002098s)
|
||||||
|
|
||||||
|
# select
|
||||||
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "select *from test1.t1"
|
||||||
|
|
||||||
|
taos> select *from test1.t1
|
||||||
|
ts | n |
|
||||||
|
========================================
|
||||||
|
2023-07-19 18:04:58.104 | 1 |
|
||||||
|
2023-07-19 18:04:59.104 | 2 |
|
||||||
|
2023-07-19 18:06:00.303 | 1 |
|
||||||
|
2023-07-19 18:06:01.303 | 2 |
|
||||||
|
Query OK, 4 row(s) in set (0.001994s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Similarly, as for the non-leader mnode dropped, read and write can of course be normal, here will not do too much display .
|
||||||
|
|
||||||
## Scaling Out Your Cluster
|
## Scaling Out Your Cluster
|
||||||
|
|
||||||
TDengine clusters can scale automatically:
|
TDengine cluster supports automatic expansion:
|
||||||
|
|
||||||
```bash
|
```Bash
|
||||||
kubectl scale statefulsets tdengine --replicas=4
|
kubectl scale statefulsets tdengine --replicas=4
|
||||||
```
|
```
|
||||||
|
|
||||||
The preceding command increases the number of replicas to 4. After running this command, query the pod status:
|
The parameter `--replica = 4 `in the above command line indicates that you want to expand the TDengine cluster to 4 nodes. After execution, first check the status of the Pod:
|
||||||
|
|
||||||
```bash
|
```Bash
|
||||||
kubectl get pods -l app=tdengine
|
kubectl get pod -l app=tdengine -n tdengine-test -o wide
|
||||||
```
|
```
|
||||||
|
|
||||||
The output is as follows:
|
The output is as follows:
|
||||||
|
|
||||||
```
|
```Plain
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||||
tdengine-0 1/1 Running 0 161m
|
tdengine-0 1/1 Running 4 (6h26m ago) 6h53m 10.244.2.75 node86 <none> <none>
|
||||||
tdengine-1 1/1 Running 0 161m
|
tdengine-1 1/1 Running 1 (6h39m ago) 6h53m 10.244.0.59 node84 <none> <none>
|
||||||
tdengine-2 1/1 Running 0 32m
|
tdengine-2 1/1 Running 0 5h16m 10.244.1.224 node85 <none> <none>
|
||||||
tdengine-3 1/1 Running 0 32m
|
tdengine-3 1/1 Running 0 3m24s 10.244.2.76 node86 <none> <none>
|
||||||
```
|
```
|
||||||
|
|
||||||
The status of all pods is Running. Once the pod status changes to Ready, you can check the dnode status:
|
At this time, the state of the POD is still Running, and the dnode state in the TDengine cluster can only be seen after the Pod status is `ready `:
|
||||||
|
|
||||||
```bash
|
```Bash
|
||||||
kubectl exec -i -t tdengine-3 -- taos -s "show dnodes"
|
kubectl exec -it tdengine-3 -n tdengine-test -- taos -s "show dnodes"
|
||||||
```
|
```
|
||||||
|
|
||||||
The following output shows that the TDengine cluster has been expanded to 4 replicas:
|
The dnode list of the expanded four-node TDengine cluster:
|
||||||
|
|
||||||
```
|
```Plain
|
||||||
taos> show dnodes
|
taos> show dnodes
|
||||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
|
||||||
============================================================================================================================================
|
=============================================================================================================================================================================================================================================
|
||||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
1 | tdengine-0.ta... | 10 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-20 09:39:04.297 | | | |
|
||||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
2 | tdengine-1.ta... | 10 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-20 09:28:24.240 | | | |
|
||||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
3 | tdengine-2.ta... | 10 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-20 10:48:43.445 | | | |
|
||||||
4 | tdengine-3.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:33:16.039 | |
|
4 | tdengine-3.ta... | 0 | 16 | ready | 2023-07-20 16:01:44.007 | 2023-07-20 16:01:44.889 | | | |
|
||||||
Query OK, 4 rows in database (0.008377s)
|
Query OK, 4 row(s) in set (0.003628s)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Scaling In Your Cluster
|
## Scaling In Your Cluster
|
||||||
|
|
||||||
When you scale in a TDengine cluster, your data is migrated to different nodes. You must run the drop dnodes command in TDengine to remove dnodes before scaling in your Kubernetes environment.
|
Since the TDengine cluster will migrate data between nodes during volume expansion and contraction, using the **kubectl** command to reduce the volume requires first using the "drop dnodes" command ( **If there are 3 replicas of db in the cluster, the number of dnodes after reduction must also be greater than or equal to 3, otherwise the drop dnode operation will be aborted** ), the node deletion is completed before Kubernetes cluster reduction.
|
||||||
|
|
||||||
Note: In a Kubernetes StatefulSet service, the newest pods are always removed first. For this reason, when you scale in your TDengine cluster, ensure that you drop the newest dnodes.
|
Note: Since Kubernetes Pods in the Statefulset can only be removed in reverse order of creation, the TDengine drop dnode also needs to be removed in reverse order of creation, otherwise the Pod will be in an error state.
|
||||||
|
|
||||||
```
|
```Bash
|
||||||
$ kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 4"
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "drop dnode 4"
|
||||||
```
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show dnodes"
|
||||||
|
|
||||||
```bash
|
|
||||||
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
|
||||||
|
|
||||||
taos> show dnodes
|
taos> show dnodes
|
||||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
|
||||||
============================================================================================================================================
|
=============================================================================================================================================================================================================================================
|
||||||
1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | |
|
1 | tdengine-0.ta... | 10 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-20 09:39:04.297 | | | |
|
||||||
2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | |
|
2 | tdengine-1.ta... | 10 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-20 09:28:24.240 | | | |
|
||||||
3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | |
|
3 | tdengine-2.ta... | 10 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-20 10:48:43.445 | | | |
|
||||||
Query OK, 3 rows in database (0.004861s)
|
Query OK, 3 row(s) in set (0.003324s)
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify that the dnode have been successfully removed by running the `kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"` command. Then run the following command to remove the pod:
|
After confirming that the removal is successful (use kubectl exec -i -t tdengine-0 --taos -s "show dnodes" to view and confirm the dnode list), use the kubectl command to remove the Pod:
|
||||||
|
|
||||||
```
|
```Plain
|
||||||
kubectl scale statefulsets tdengine --replicas=3
|
kubectl scale statefulsets tdengine --replicas=3 -n tdengine-test
|
||||||
```
|
```
|
||||||
|
|
||||||
The newest pod in the deployment is removed. Run the `kubectl get pods -l app=tdengine` command to query the pod status:
|
The last Pod will be deleted. Use the command kubectl get pods -l app = tdengine to check the Pod status:
|
||||||
|
|
||||||
```
|
```Plain
|
||||||
$ kubectl get pods -l app=tdengine
|
kubectl get pod -l app=tdengine -n tdengine-test -o wide
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||||
tdengine-0 1/1 Running 0 4m7s
|
tdengine-0 1/1 Running 4 (6h55m ago) 7h22m 10.244.2.75 node86 <none> <none>
|
||||||
tdengine-1 1/1 Running 0 3m55s
|
tdengine-1 1/1 Running 1 (7h9m ago) 7h23m 10.244.0.59 node84 <none> <none>
|
||||||
tdengine-2 1/1 Running 0 2m28s
|
tdengine-2 1/1 Running 0 5h45m 10.244.1.224 node85 <none> <none>
|
||||||
```
|
```
|
||||||
|
|
||||||
After the pod has been removed, manually delete the PersistentVolumeClaim (PVC). Otherwise, future scale-outs will attempt to use existing data.
|
After the Pod is deleted, the PVC needs to be deleted manually, otherwise the previous data will continue to be used for the next expansion, resulting in the inability to join the cluster normally.
|
||||||
|
|
||||||
```bash
|
```Bash
|
||||||
$ kubectl delete pvc taosdata-tdengine-3
|
kubectl delete pvc aosdata-tdengine-3 -n tdengine-test
|
||||||
```
|
```
|
||||||
|
|
||||||
Your cluster has now been safely scaled in, and you can scale it out again as necessary.
|
The cluster state at this time is safe and can be scaled up again if needed.
|
||||||
|
|
||||||
```bash
|
```Bash
|
||||||
$ kubectl scale statefulsets tdengine --replicas=4
|
kubectl scale statefulsets tdengine --replicas=4 -n tdengine-test
|
||||||
statefulset.apps/tdengine scaled
|
statefulset.apps/tdengine scaled
|
||||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
kubectl get pod -l app=tdengine -n tdengine-test -o wide
|
||||||
tdengine-0 1/1 Running 0 35m
|
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||||
tdengine-1 1/1 Running 0 34m
|
tdengine-0 1/1 Running 4 (6h59m ago) 7h27m 10.244.2.75 node86 <none> <none>
|
||||||
tdengine-2 1/1 Running 0 12m
|
tdengine-1 1/1 Running 1 (7h13m ago) 7h27m 10.244.0.59 node84 <none> <none>
|
||||||
tdengine-3 0/1 ContainerCreating 0 4s
|
tdengine-2 1/1 Running 0 5h49m 10.244.1.224 node85 <none> <none>
|
||||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine
|
tdengine-3 1/1 Running 0 20s 10.244.2.77 node86 <none> <none>
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
tdengine-0 1/1 Running 0 35m
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show dnodes"
|
||||||
tdengine-1 1/1 Running 0 34m
|
|
||||||
tdengine-2 1/1 Running 0 12m
|
|
||||||
tdengine-3 0/1 Running 0 7s
|
|
||||||
it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
|
||||||
|
|
||||||
taos> show dnodes
|
taos> show dnodes
|
||||||
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
|
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
|
||||||
======================================================================================================================================
|
=============================================================================================================================================================================================================================================
|
||||||
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
|
1 | tdengine-0.ta... | 10 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-20 09:39:04.297 | | | |
|
||||||
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
|
2 | tdengine-1.ta... | 10 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-20 09:28:24.240 | | | |
|
||||||
5 | tdengine-2.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:01:36.479 | |
|
3 | tdengine-2.ta... | 10 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-20 10:48:43.445 | | | |
|
||||||
6 | tdengine-3.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:13:54.411 | |
|
5 | tdengine-3.ta... | 0 | 16 | ready | 2023-07-20 16:31:34.092 | 2023-07-20 16:38:17.419 | | | |
|
||||||
Query OK, 4 row(s) in set (0.001348s)
|
Query OK, 4 row(s) in set (0.003881s)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Remove a TDengine Cluster
|
## Remove a TDengine Cluster
|
||||||
|
|
||||||
To fully remove a TDengine cluster, you must delete its statefulset, svc, configmap, and pvc entries:
|
> **When deleting the PVC, you need to pay attention to the pv persistentVolumeReclaimPolicy policy. It is recommended to change to Delete, so that the PV will be automatically cleaned up when the PVC is deleted, and the underlying CSI storage resources will be cleaned up at the same time. If the policy of deleting the PVC to automatically clean up the PV is not configured, and then after deleting the pvc, when manually cleaning up the PV, the CSI storage resources corresponding to the PV may not be released.**
|
||||||
|
|
||||||
```bash
|
Complete removal of TDengine cluster, need to clean up statefulset, svc, configmap, pvc respectively.
|
||||||
kubectl delete statefulset -l app=tdengine
|
|
||||||
kubectl delete svc -l app=tdengine
|
|
||||||
kubectl delete pvc -l app=tdengine
|
|
||||||
kubectl delete configmap taoscfg
|
|
||||||
|
|
||||||
|
```Bash
|
||||||
|
kubectl delete statefulset -l app=tdengine -n tdengine-test
|
||||||
|
kubectl delete svc -l app=tdengine -n tdengine-test
|
||||||
|
kubectl delete pvc -l app=tdengine -n tdengine-test
|
||||||
|
kubectl delete configmap taoscfg -n tdengine-test
|
||||||
```
|
```
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### Error 1
|
### Error 1
|
||||||
|
|
||||||
If you remove a pod without first running `drop dnode`, some TDengine nodes will go offline.
|
No "drop dnode" is directly reduced. Since the TDengine has not deleted the node, the reduced pod causes some nodes in the TDengine cluster to be offline.
|
||||||
|
|
||||||
```
|
```Plain
|
||||||
$ kubectl exec -it tdengine-0 -- taos -s "show dnodes"
|
kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show dnodes"
|
||||||
|
|
||||||
taos> show dnodes
|
taos> show dnodes
|
||||||
id | endpoint | vnodes | support_vnodes | status | create_time | offline reason |
|
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
|
||||||
======================================================================================================================================
|
=============================================================================================================================================================================================================================================
|
||||||
1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | |
|
1 | tdengine-0.ta... | 10 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-20 09:39:04.297 | | | |
|
||||||
2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | |
|
2 | tdengine-1.ta... | 10 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-20 09:28:24.240 | | | |
|
||||||
5 | tdengine-2.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:01:36.479 | status msg timeout |
|
3 | tdengine-2.ta... | 10 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-20 10:48:43.445 | | | |
|
||||||
6 | tdengine-3.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:13:54.411 | status msg timeout |
|
5 | tdengine-3.ta... | 0 | 16 | offline | 2023-07-20 16:31:34.092 | 2023-07-20 16:38:17.419 | status msg timeout | | |
|
||||||
Query OK, 4 row(s) in set (0.001323s)
|
Query OK, 4 row(s) in set (0.003862s)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Error 2
|
## Finally
|
||||||
|
|
||||||
If the number of nodes after a scale-in is less than the value of the replica parameter, the cluster will go down:
|
For the high availability and high reliability of TDengine in a Kubernetes environment, hardware damage and disaster recovery are divided into two levels:
|
||||||
|
|
||||||
Create a database with replica set to 2 and add data.
|
1. The disaster recovery capability of the underlying distributed Block Storage, the multi-copy of Block Storage, the current popular distributed Block Storage such as Ceph, has the multi-copy capability, extending the storage copy to different racks, cabinets, computer rooms, Data center (or directly use the Block Storage service provided by Public Cloud vendors)
|
||||||
|
2. TDengine disaster recovery, in TDengine Enterprise, itself has when a dnode permanently offline (TCE-metal disk damage, data sorting loss), re-pull a blank dnode to restore the original dnode work.
|
||||||
|
|
||||||
```bash
|
Finally, welcome to [TDengine Cloud ](https://cloud.tdengine.com/)to experience the one-stop fully managed TDengine Cloud as a Service.
|
||||||
kubectl exec -i -t tdengine-0 -- \
|
|
||||||
taos -s \
|
|
||||||
"create database if not exists test replica 2;
|
|
||||||
use test;
|
|
||||||
create table if not exists t1(ts timestamp, n int);
|
|
||||||
insert into t1 values(now, 1)(now+1s, 2);"
|
|
||||||
|
|
||||||
|
> TDengine Cloud is a minimalist fully managed time series data processing Cloud as a Service platform developed based on the open source time series database TDengine. In addition to high-performance time series database, it also has system functions such as caching, subscription and stream computing, and provides convenient and secure data sharing, as well as numerous enterprise-level functions. It allows enterprises in the fields of Internet of Things, Industrial Internet, Finance, IT operation and maintenance monitoring to significantly reduce labor costs and operating costs in the management of time series data.
|
||||||
```
|
|
||||||
|
|
||||||
Scale in to one node:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kubectl scale statefulsets tdengine --replicas=1
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
In the TDengine CLI, you can see that no database operations succeed:
|
|
||||||
|
|
||||||
```
|
|
||||||
taos> show dnodes;
|
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
|
||||||
======================================================================================================================================
|
|
||||||
1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | |
|
|
||||||
2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout |
|
|
||||||
Query OK, 2 row(s) in set (0.000845s)
|
|
||||||
|
|
||||||
taos> show dnodes;
|
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
|
||||||
======================================================================================================================================
|
|
||||||
1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | |
|
|
||||||
2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout |
|
|
||||||
Query OK, 2 row(s) in set (0.000837s)
|
|
||||||
|
|
||||||
taos> use test;
|
|
||||||
Database changed.
|
|
||||||
|
|
||||||
taos> insert into t1 values(now, 3);
|
|
||||||
|
|
||||||
DB error: Unable to resolve FQDN (0.013874s)
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ description: This document describes how to deploy a TDengine cluster on a serve
|
||||||
|
|
||||||
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
|
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
|
||||||
|
|
||||||
This document describes how to manually deploy a cluster on a host as well as how to deploy on Kubernetes and by using Helm.
|
This document describes how to manually deploy a cluster on a host directly and deploy a cluster with Docker, Kubernetes or Helm.
|
||||||
|
|
||||||
```mdx-code-block
|
```mdx-code-block
|
||||||
import DocCardList from '@theme/DocCardList';
|
import DocCardList from '@theme/DocCardList';
|
||||||
|
|
|
@ -42,11 +42,20 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
||||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||||
|
| 17 | GEOMETRY | User-defined | Geometry |
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
|
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
|
||||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||||
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
- The length of BINARY can be up to 16,374(data column is 65,517 and tag column is 16,382 since version 3.0.5.0) bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||||
|
- The maximum length of the GEOMETRY data column is 65,517 bytes, and the maximum length of the tag column is 16,382 bytes. Supports POINT, LINESTRING, and POLYGON subtypes of 2D. The following table describes the length calculation method:
|
||||||
|
|
||||||
|
| # | **Syntax** | **MinLen** | **MaxLen** | **Growth of each point** |
|
||||||
|
|---|--------------------------------------|------------|------------|--------------------------|
|
||||||
|
| 1 | POINT(1.0 1.0) | 21 | 21 | NA |
|
||||||
|
| 2 | LINESTRING(1.0 1.0, 2.0 2.0) | 9+2*16 | 9+4094*16 | +16 |
|
||||||
|
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
|
||||||
|
|
||||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
|
@ -36,14 +36,12 @@ database_option: {
|
||||||
| TSDB_PAGESIZE value
|
| TSDB_PAGESIZE value
|
||||||
| WAL_RETENTION_PERIOD value
|
| WAL_RETENTION_PERIOD value
|
||||||
| WAL_RETENTION_SIZE value
|
| WAL_RETENTION_SIZE value
|
||||||
| WAL_ROLL_PERIOD value
|
|
||||||
| WAL_SEGMENT_SIZE value
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 96.
|
- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 256.
|
||||||
- CACHEMODEL: specifies how the latest data in subtables is stored in the cache. The default value is none.
|
- CACHEMODEL: specifies how the latest data in subtables is stored in the cache. The default value is none.
|
||||||
- none: The latest data is not cached.
|
- none: The latest data is not cached.
|
||||||
- last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function.
|
- last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function.
|
||||||
|
@ -58,7 +56,7 @@ database_option: {
|
||||||
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
||||||
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
||||||
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
||||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. The Enterprise Edition supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; the Community Edition does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. TDengine Enterprise supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; TDengine OSS does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||||
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
||||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||||
|
@ -75,10 +73,8 @@ database_option: {
|
||||||
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
||||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value is 3600, which means the data in latest 3600 seconds will be kept in WAL for data subscription. Please adjust this parameter to a more proper value for your data subscription.
|
||||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
|
||||||
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
|
|
||||||
### Example Statement
|
### Example Statement
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -45,9 +45,9 @@ table_option: {
|
||||||
|
|
||||||
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
|
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
|
||||||
2. The maximum length of the table name is 192 bytes.
|
2. The maximum length of the table name is 192 bytes.
|
||||||
3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
|
3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR/GEOMETRY column are also counted.
|
||||||
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
|
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
|
||||||
5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
|
5. The maximum length in bytes must be specified when using BINARY/NCHAR/GEOMETRY types.
|
||||||
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
|
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
|
||||||
For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally.
|
For example \`aBc\` and \`abc\` are different table names but `abc` and `aBc` are same table names because they are both converted to `abc` internally.
|
||||||
Only ASCII visible characters can be used with escape character.
|
Only ASCII visible characters can be used with escape character.
|
||||||
|
|
|
@ -51,6 +51,11 @@ DESCRIBE [db_name.]stb_name;
|
||||||
|
|
||||||
### View tag information for all child tables in the supertable
|
### View tag information for all child tables in the supertable
|
||||||
|
|
||||||
|
```
|
||||||
|
SHOW TABLE TAGS FROM table_name [FROM db_name];
|
||||||
|
SHOW TABLE TAGS FROM [db_name.]table_name;
|
||||||
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> SHOW TABLE TAGS FROM st1;
|
taos> SHOW TABLE TAGS FROM st1;
|
||||||
tbname | id | loc |
|
tbname | id | loc |
|
||||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
interp_clause:
|
interp_clause:
|
||||||
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||||
|
|
||||||
partition_by_clause:
|
partition_by_clause:
|
||||||
PARTITION BY expr [, expr] ...
|
PARTITION BY expr [, expr] ...
|
||||||
|
|
|
@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
|
||||||
ELAPSED(ts_primary_key [, time_unit])
|
ELAPSED(ts_primary_key [, time_unit])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||||
|
|
||||||
**Return value type**: Double if the input value is not NULL;
|
**Return value type**: Double if the input value is not NULL;
|
||||||
|
|
||||||
|
@ -698,7 +698,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
||||||
LEASTSQUARES(expr, start_val, step_val)
|
LEASTSQUARES(expr, start_val, step_val)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
|
**Description**: The linear regression function of a specified column, `start_val` is the initial value and `step_val` is the step value.
|
||||||
|
|
||||||
**Return value type**: A string in the format of "(slope, intercept)"
|
**Return value type**: A string in the format of "(slope, intercept)"
|
||||||
|
|
||||||
|
@ -867,10 +867,16 @@ FIRST(expr)
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INTERP(expr)
|
INTERP(expr [, ignore_null_values])
|
||||||
|
|
||||||
|
ignore_null_values: {
|
||||||
|
0
|
||||||
|
| 1
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. The value of `ignore_null_values` can be 0 or 1, 1 means null values are ignored. The default value of this parameter is 0.
|
||||||
|
|
||||||
|
|
||||||
**Return value type**: Same as the column being operated upon
|
**Return value type**: Same as the column being operated upon
|
||||||
|
|
||||||
|
@ -883,9 +889,10 @@ INTERP(expr)
|
||||||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||||
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
||||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||||
|
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||||
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||||
|
@ -992,19 +999,14 @@ SAMPLE(expr, k)
|
||||||
|
|
||||||
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
|
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
|
||||||
|
|
||||||
**Return value type**: Same as the column being operated plus the associated timestamp
|
**Return value type**: Same as the column being operated
|
||||||
|
|
||||||
**Applicable data types**: Any data type except for tags of STable
|
**Applicable data types**: Any data type
|
||||||
|
|
||||||
**Applicable nested query**: Inner query and Outer query
|
**Applicable nested query**: Inner query and Outer query
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
**More explanations**:
|
|
||||||
|
|
||||||
This function cannot be used in expression calculation.
|
|
||||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
|
||||||
|
|
||||||
|
|
||||||
### TAIL
|
### TAIL
|
||||||
|
|
||||||
|
@ -1049,11 +1051,11 @@ TOP(expr, k)
|
||||||
UNIQUE(expr)
|
UNIQUE(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used.
|
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword.
|
||||||
|
|
||||||
**Return value type**:Same as the data type of the column being operated upon
|
**Return value type**:Same as the data type of the column being operated upon
|
||||||
|
|
||||||
**Applicable column types**: Any data types except for timestamp
|
**Applicable column types**: Any data types
|
||||||
|
|
||||||
**Applicable table types**: table, STable
|
**Applicable table types**: table, STable
|
||||||
|
|
||||||
|
@ -1082,7 +1084,6 @@ CSUM(expr)
|
||||||
|
|
||||||
- Arithmetic operation can't be performed on the result of `csum` function
|
- Arithmetic operation can't be performed on the result of `csum` function
|
||||||
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
||||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
|
||||||
|
|
||||||
|
|
||||||
### DERIVATIVE
|
### DERIVATIVE
|
||||||
|
@ -1106,7 +1107,6 @@ ignore_negative: {
|
||||||
|
|
||||||
**More explanation**:
|
**More explanation**:
|
||||||
|
|
||||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
|
||||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
|
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
|
||||||
|
|
||||||
### DIFF
|
### DIFF
|
||||||
|
@ -1169,7 +1169,6 @@ MAVG(expr, k)
|
||||||
|
|
||||||
- Arithmetic operation can't be performed on the result of `MAVG`.
|
- Arithmetic operation can't be performed on the result of `MAVG`.
|
||||||
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
|
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
|
||||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
|
||||||
|
|
||||||
|
|
||||||
### STATECOUNT
|
### STATECOUNT
|
||||||
|
@ -1195,7 +1194,6 @@ STATECOUNT(expr, oper, val)
|
||||||
|
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
|
|
||||||
- Can't be used with window operation, like interval/state_window/session_window
|
- Can't be used with window operation, like interval/state_window/session_window
|
||||||
|
|
||||||
|
|
||||||
|
@ -1223,7 +1221,6 @@ STATEDURATION(expr, oper, val, unit)
|
||||||
|
|
||||||
**More explanations**:
|
**More explanations**:
|
||||||
|
|
||||||
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
|
|
||||||
- Can't be used with window operation, like interval/state_window/session_window
|
- Can't be used with window operation, like interval/state_window/session_window
|
||||||
|
|
||||||
|
|
||||||
|
@ -1241,7 +1238,6 @@ TWA(expr)
|
||||||
|
|
||||||
**Applicable table types**: standard tables and supertables
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
- Must be used together with `PARTITION BY tbname` to force the result into each single timeline.
|
|
||||||
|
|
||||||
|
|
||||||
## System Information Functions
|
## System Information Functions
|
||||||
|
@ -1278,3 +1274,161 @@ SELECT SERVER_STATUS();
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The server status.
|
**Description**: The server status.
|
||||||
|
|
||||||
|
|
||||||
|
## Geometry Functions
|
||||||
|
|
||||||
|
### Geometry Input Functions
|
||||||
|
|
||||||
|
Geometry input functions create geometry data from WTK.
|
||||||
|
|
||||||
|
#### ST_GeomFromText
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_GeomFromText(VARCHAR WKT expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Return a specified GEOMETRY value from Well-Known Text representation (WKT).
|
||||||
|
|
||||||
|
**Return value type**: GEOMETRY
|
||||||
|
|
||||||
|
**Applicable data types**: VARCHAR
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- The input can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
|
||||||
|
- The output is a GEOMETRY data type, internal defined as binary string.
|
||||||
|
|
||||||
|
### Geometry Output Functions
|
||||||
|
|
||||||
|
Geometry output functions convert geometry data into WTK.
|
||||||
|
|
||||||
|
#### ST_AsText
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_AsText(GEOMETRY geom)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Return a specified Well-Known Text representation (WKT) value from GEOMETRY data.
|
||||||
|
|
||||||
|
**Return value type**: VARCHAR
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- The output can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
|
||||||
|
|
||||||
|
### Geometry Relationships Functions
|
||||||
|
|
||||||
|
Geometry relationships functions determine spatial relationships between geometries.
|
||||||
|
|
||||||
|
#### ST_Intersects
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Intersects(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Compares two geometries and returns true if they intersect.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- Geometries intersect if they have any point in common.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Equals
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Equals(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if the given geometries are "spatially equal".
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- 'Spatially equal' means ST_Contains(A,B) = true and ST_Contains(B,A) = true, and the ordering of points can be different but represent the same geometry structure.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Touches
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Touches(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if A and B intersect, but their interiors do not intersect.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- A and B have at least one point in common, and the common points lie in at least one boundary.
|
||||||
|
- For Point/Point inputs the relationship is always FALSE, since points do not have a boundary.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Covers
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Covers(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if every point in Geometry B lies inside (intersects the interior or boundary of) Geometry A.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- A covers B means no point of B lies outside (in the exterior of) A.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Contains
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Contains(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if geometry A contains geometry B.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- A contains B if and only if all points of B lie inside (i.e. in the interior or boundary of) A (or equivalently, no points of B lie in the exterior of A), and the interiors of A and B have at least one point in common.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_ContainsProperly
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if every point of B lies inside A.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- There is no point of B that lies on the boundary of A or in the exterior of A.
|
||||||
|
|
|
@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct
|
||||||
A PARTITION BY clause is processed as follows:
|
A PARTITION BY clause is processed as follows:
|
||||||
|
|
||||||
- The PARTITION BY clause must occur after the WHERE clause
|
- The PARTITION BY clause must occur after the WHERE clause
|
||||||
- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
||||||
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
|
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -39,7 +39,7 @@ TDengine supports the `UNION` and `UNION ALL` operations. UNION ALL collects all
|
||||||
| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than |
|
| 3 | \>, < | All types except BLOB, MEDIUMBLOB, and JSON | Greater than and less than |
|
||||||
| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to |
|
| 4 | \>=, <= | All types except BLOB, MEDIUMBLOB, and JSON | Greater than or equal to and less than or equal to |
|
||||||
| 5 | IS [NOT] NULL | All types | Indicates whether the value is null |
|
| 5 | IS [NOT] NULL | All types | Indicates whether the value is null |
|
||||||
| 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, and JSON | Closed interval comparison |
|
| 6 | [NOT] BETWEEN AND | All types except BLOB, MEDIUMBLOB, JSON and GEOMETRY | Closed interval comparison |
|
||||||
| 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list |
|
| 7 | IN | All types except BLOB, MEDIUMBLOB, and JSON; the primary key (timestamp) is also not supported | Equal to any value in the list |
|
||||||
| 8 | LIKE | BINARY, NCHAR, and VARCHAR | Wildcard match |
|
| 8 | LIKE | BINARY, NCHAR, and VARCHAR | Wildcard match |
|
||||||
| 9 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match |
|
| 9 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match |
|
||||||
|
@ -54,7 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as
|
||||||
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
|
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
|
||||||
|
|
||||||
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
|
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
|
||||||
- Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns.
|
- Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types.
|
||||||
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
|
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
|
||||||
|
|
||||||
## Logical Operators
|
## Logical Operators
|
||||||
|
|
|
@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
||||||
|
|
||||||
- Maximum length of database name is 64 bytes
|
- Maximum length of database name is 64 bytes
|
||||||
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
|
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
|
||||||
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
- Maximum length of each data row is 48K(64K since version 3.0.5.0) bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||||
- The maximum length of a column name is 64 bytes.
|
- The maximum length of a column name is 64 bytes.
|
||||||
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
|
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
|
||||||
- The maximum length of a tag name is 64 bytes
|
- The maximum length of a tag name is 64 bytes
|
||||||
|
@ -36,7 +36,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
||||||
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
|
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
|
||||||
- The number of replicas can only be 1 or 3.
|
- The number of replicas can only be 1 or 3.
|
||||||
- The maximum length of a username is 23 bytes.
|
- The maximum length of a username is 23 bytes.
|
||||||
- The maximum length of a password is 128 bytes.
|
- The maximum length of a password is 31 bytes.
|
||||||
- The maximum number of rows depends on system resources.
|
- The maximum number of rows depends on system resources.
|
||||||
- The maximum number of vnodes in a database is 1024.
|
- The maximum number of vnodes in a database is 1024.
|
||||||
|
|
||||||
|
|
|
@ -178,6 +178,7 @@ The following list shows all reserved keywords:
|
||||||
|
|
||||||
- MATCH
|
- MATCH
|
||||||
- MAX_DELAY
|
- MAX_DELAY
|
||||||
|
- MAX_SPEED
|
||||||
- MAXROWS
|
- MAXROWS
|
||||||
- MERGE
|
- MERGE
|
||||||
- META
|
- META
|
||||||
|
@ -334,8 +335,6 @@ The following list shows all reserved keywords:
|
||||||
- WAL_LEVEL
|
- WAL_LEVEL
|
||||||
- WAL_RETENTION_PERIOD
|
- WAL_RETENTION_PERIOD
|
||||||
- WAL_RETENTION_SIZE
|
- WAL_RETENTION_SIZE
|
||||||
- WAL_ROLL_PERIOD
|
|
||||||
- WAL_SEGMENT_SIZE
|
|
||||||
- WATERMARK
|
- WATERMARK
|
||||||
- WHERE
|
- WHERE
|
||||||
- WINDOW_CLOSE
|
- WINDOW_CLOSE
|
||||||
|
|
|
@ -29,7 +29,7 @@ This document introduces the tables of INFORMATION_SCHEMA and their structure.
|
||||||
Provides information about dnodes. Similar to SHOW DNODES.
|
Provides information about dnodes. Similar to SHOW DNODES.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :------------: | ------------ | ------------------------- |
|
| --- | :------------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 1 | vnodes | SMALLINT | Current number of vnodes on the dnode. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
| 2 | support_vnodes | SMALLINT | Maximum number of vnodes on the dnode |
|
||||||
| 3 | status | BINARY(10) | Current status |
|
| 3 | status | BINARY(10) | Current status |
|
||||||
|
@ -43,7 +43,7 @@ Provides information about dnodes. Similar to SHOW DNODES.
|
||||||
Provides information about mnodes. Similar to SHOW MNODES.
|
Provides information about mnodes. Similar to SHOW MNODES.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | ------------------ |
|
| --- | :---------: | ------------- | ------------------------------------------ |
|
||||||
| 1 | id | SMALLINT | Mnode ID |
|
| 1 | id | SMALLINT | Mnode ID |
|
||||||
| 2 | endpoint | BINARY(134) | Mnode endpoint |
|
| 2 | endpoint | BINARY(134) | Mnode endpoint |
|
||||||
| 3 | role | BINARY(10) | Current role |
|
| 3 | role | BINARY(10) | Current role |
|
||||||
|
@ -55,7 +55,7 @@ Provides information about mnodes. Similar to SHOW MNODES.
|
||||||
Provides information about qnodes. Similar to SHOW QNODES.
|
Provides information about qnodes. Similar to SHOW QNODES.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | ------------ |
|
| --- | :---------: | ------------- | --------------- |
|
||||||
| 1 | id | SMALLINT | Qnode ID |
|
| 1 | id | SMALLINT | Qnode ID |
|
||||||
| 2 | endpoint | BINARY(134) | Qnode endpoint |
|
| 2 | endpoint | BINARY(134) | Qnode endpoint |
|
||||||
| 3 | create_time | TIMESTAMP | Creation time |
|
| 3 | create_time | TIMESTAMP | Creation time |
|
||||||
|
@ -65,7 +65,7 @@ Provides information about qnodes. Similar to SHOW QNODES.
|
||||||
Provides information about the cluster.
|
Provides information about the cluster.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | ---------- |
|
| --- | :---------: | ------------- | --------------- |
|
||||||
| 1 | id | BIGINT | Cluster ID |
|
| 1 | id | BIGINT | Cluster ID |
|
||||||
| 2 | name | BINARY(134) | Cluster name |
|
| 2 | name | BINARY(134) | Cluster name |
|
||||||
| 3 | create_time | TIMESTAMP | Creation time |
|
| 3 | create_time | TIMESTAMP | Creation time |
|
||||||
|
@ -81,7 +81,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 7 | strict | BINARY(4) | Obsoleted |
|
||||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
@ -98,21 +98,19 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 24 | wal_retention_period | INT | WAL retention period, in second. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 26 | wal_roll_period | INT | WAL rotation period. It should be noted that `wal_roll_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 26 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 27 | wal_segment_size | BIGINT | WAL file size. It should be noted that `wal_segment_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 27 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 28 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 29 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_suffix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
|
||||||
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB. It should be noted that `tsdb_pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
|
||||||
|
|
||||||
## INS_FUNCTIONS
|
## INS_FUNCTIONS
|
||||||
|
|
||||||
Provides information about user-defined functions.
|
Provides information about user-defined functions.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | -------------- |
|
| --- | :-----------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | name | BINARY(64) | Function name |
|
| 1 | name | BINARY(64) | Function name |
|
||||||
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 2 | comment | BINARY(255) | Function description. It should be noted that `comment` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 3 | aggregate | INT | Whether the UDF is an aggregate function. It should be noted that `aggregate` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
@ -122,27 +120,27 @@ Provides information about user-defined functions.
|
||||||
| 7 | bufsize | INT | Buffer size |
|
| 7 | bufsize | INT | Buffer size |
|
||||||
| 8 | func_language | BINARY(31) | UDF programming language |
|
| 8 | func_language | BINARY(31) | UDF programming language |
|
||||||
| 9 | func_body | BINARY(16384) | UDF function body |
|
| 9 | func_body | BINARY(16384) | UDF function body |
|
||||||
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated|
|
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated |
|
||||||
|
|
||||||
## INS_INDEXES
|
## INS_INDEXES
|
||||||
|
|
||||||
Provides information about user-created indices. Similar to SHOW INDEX.
|
Provides information about user-created indices. Similar to SHOW INDEX.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :--------------: | ------------ | ---------------------------------------------------------------------------------- |
|
| --- | :--------------: | ------------- | --------------------------------------------------------------------- |
|
||||||
| 1 | db_name | BINARY(32) | Database containing the table with the specified index |
|
| 1 | db_name | BINARY(32) | Database containing the table with the specified index |
|
||||||
| 2 | table_name | BINARY(192) | Table containing the specified index |
|
| 2 | table_name | BINARY(192) | Table containing the specified index |
|
||||||
| 3 | index_name | BINARY(192) | Index name |
|
| 3 | index_name | BINARY(192) | Index name |
|
||||||
| 4 | db_name | BINARY(64) | Index column |
|
| 4 | db_name | BINARY(64) | Index column |
|
||||||
| 5 | index_type | BINARY(10) | SMA or FULLTEXT index |
|
| 5 | index_type | BINARY(10) | SMA or tag index |
|
||||||
| 6 | index_extensions | BINARY(256) | Other information For SMA indices, this shows a list of functions. For FULLTEXT indices, this is null. |
|
| 6 | index_extensions | BINARY(256) | Other information For SMA/tag indices, this shows a list of functions |
|
||||||
|
|
||||||
## INS_STABLES
|
## INS_STABLES
|
||||||
|
|
||||||
Provides information about supertables.
|
Provides information about supertables.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :-----------: | ------------ | ------------------------ |
|
| --- | :-----------: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | stable_name | BINARY(192) | Supertable name |
|
| 1 | stable_name | BINARY(192) | Supertable name |
|
||||||
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
| 2 | db_name | BINARY(64) | All databases in the supertable |
|
||||||
| 3 | create_time | TIMESTAMP | Creation time |
|
| 3 | create_time | TIMESTAMP | Creation time |
|
||||||
|
@ -159,7 +157,7 @@ Provides information about supertables.
|
||||||
Provides information about standard tables and subtables.
|
Provides information about standard tables and subtables.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :-----------: | ------------ | ---------------- |
|
| --- | :-----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | table_name | BINARY(192) | Table name |
|
| 1 | table_name | BINARY(192) | Table name |
|
||||||
| 2 | db_name | BINARY(64) | Database name |
|
| 2 | db_name | BINARY(64) | Database name |
|
||||||
| 3 | create_time | TIMESTAMP | Creation time |
|
| 3 | create_time | TIMESTAMP | Creation time |
|
||||||
|
@ -174,7 +172,7 @@ Provides information about standard tables and subtables.
|
||||||
## INS_TAGS
|
## INS_TAGS
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------- | ---------------------- |
|
| --- | :---------: | ------------- | --------------- |
|
||||||
| 1 | table_name | BINARY(192) | Table name |
|
| 1 | table_name | BINARY(192) | Table name |
|
||||||
| 2 | db_name | BINARY(64) | Database name |
|
| 2 | db_name | BINARY(64) | Database name |
|
||||||
| 3 | stable_name | BINARY(192) | Supertable name |
|
| 3 | stable_name | BINARY(192) | Supertable name |
|
||||||
|
@ -185,7 +183,7 @@ Provides information about standard tables and subtables.
|
||||||
## INS_COLUMNS
|
## INS_COLUMNS
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------- | ---------------------- |
|
| --- | :-----------: | ------------- | ---------------- |
|
||||||
| 1 | table_name | BINARY(192) | Table name |
|
| 1 | table_name | BINARY(192) | Table name |
|
||||||
| 2 | db_name | BINARY(64) | Database name |
|
| 2 | db_name | BINARY(64) | Database name |
|
||||||
| 3 | table_type | BINARY(21) | Table type |
|
| 3 | table_type | BINARY(21) | Table type |
|
||||||
|
@ -201,7 +199,7 @@ Provides information about standard tables and subtables.
|
||||||
Provides information about TDengine users.
|
Provides information about TDengine users.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | -------- |
|
| --- | :---------: | ------------- | ---------------- |
|
||||||
| 1 | user_name | BINARY(23) | User name |
|
| 1 | user_name | BINARY(23) | User name |
|
||||||
| 2 | privilege | BINARY(256) | User permissions |
|
| 2 | privilege | BINARY(256) | User permissions |
|
||||||
| 3 | create_time | TIMESTAMP | Creation time |
|
| 3 | create_time | TIMESTAMP | Creation time |
|
||||||
|
@ -211,7 +209,7 @@ Provides information about TDengine users.
|
||||||
Provides information about TDengine Enterprise Edition permissions.
|
Provides information about TDengine Enterprise Edition permissions.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | -------------------------------------------------- |
|
| --- | :---------: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
| 1 | version | BINARY(9) | Whether the deployment is a licensed or trial version |
|
||||||
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
| 2 | cpu_cores | BINARY(9) | CPU cores included in license |
|
||||||
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 3 | dnodes | BINARY(10) | Dnodes included in license. It should be noted that `dnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
@ -232,7 +230,7 @@ Provides information about TDengine Enterprise Edition permissions.
|
||||||
Provides information about vgroups.
|
Provides information about vgroups.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :-------: | ------------ | ------------------------------------------------------ |
|
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | vgroup_id | INT | Vgroup ID |
|
| 1 | vgroup_id | INT | Vgroup ID |
|
||||||
| 2 | db_name | BINARY(32) | Database name |
|
| 2 | db_name | BINARY(32) | Database name |
|
||||||
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 3 | tables | INT | Tables in vgroup. It should be noted that `tables` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
@ -252,7 +250,7 @@ Provides information about vgroups.
|
||||||
Provides system configuration information.
|
Provides system configuration information.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :------: | ------------ | ------------ |
|
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | name | BINARY(32) | Parameter |
|
| 1 | name | BINARY(32) | Parameter |
|
||||||
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 2 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
||||||
|
@ -261,7 +259,7 @@ Provides system configuration information.
|
||||||
Provides dnode configuration information.
|
Provides dnode configuration information.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :------: | ------------ | ------------ |
|
| --- | :--------: | ------------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | dnode_id | INT | Dnode ID |
|
| 1 | dnode_id | INT | Dnode ID |
|
||||||
| 2 | name | BINARY(32) | Parameter |
|
| 2 | name | BINARY(32) | Parameter |
|
||||||
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 3 | value | BINARY(64) | Value. It should be noted that `value` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
@ -269,7 +267,7 @@ Provides dnode configuration information.
|
||||||
## INS_TOPICS
|
## INS_TOPICS
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :---------: | ------------ | ------------------------------ |
|
| --- | :---------: | ------------- | -------------------------------------- |
|
||||||
| 1 | topic_name | BINARY(192) | Topic name |
|
| 1 | topic_name | BINARY(192) | Topic name |
|
||||||
| 2 | db_name | BINARY(64) | Database for the topic |
|
| 2 | db_name | BINARY(64) | Database for the topic |
|
||||||
| 3 | create_time | TIMESTAMP | Creation time |
|
| 3 | create_time | TIMESTAMP | Creation time |
|
||||||
|
@ -278,16 +276,18 @@ Provides dnode configuration information.
|
||||||
## INS_SUBSCRIPTIONS
|
## INS_SUBSCRIPTIONS
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :------------: | ------------ | ------------------------ |
|
| --- | :------------: | ------------- | --------------------------- |
|
||||||
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
| 1 | topic_name | BINARY(204) | Subscribed topic |
|
||||||
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
|
||||||
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
| 3 | vgroup_id | INT | Vgroup ID for the consumer |
|
||||||
| 4 | consumer_id | BIGINT | Consumer ID |
|
| 4 | consumer_id | BIGINT | Consumer ID |
|
||||||
|
| 5 | offset | BINARY(64) | Consumption progress |
|
||||||
|
| 6 | rows | BIGINT | Number of consumption items |
|
||||||
|
|
||||||
## INS_STREAMS
|
## INS_STREAMS
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :----------: | ------------ | --------------------------------------- |
|
| --- | :----------: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 1 | stream_name | BINARY(64) | Stream name |
|
| 1 | stream_name | BINARY(64) | Stream name |
|
||||||
| 2 | create_time | TIMESTAMP | Creation time |
|
| 2 | create_time | TIMESTAMP | Creation time |
|
||||||
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
|
||||||
|
@ -297,3 +297,13 @@ Provides dnode configuration information.
|
||||||
| 7 | target_table | BINARY(192) | Target table |
|
| 7 | target_table | BINARY(192) | Target table |
|
||||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
||||||
|
## INS_USER_PRIVILEGES
|
||||||
|
|
||||||
|
| # | **Column** | **Data Type** | **Description** |** |
|
||||||
|
| --- | :----------: | ------------ | -------------------------------------------|
|
||||||
|
| 1 | user_name | VARCHAR(24) | Username |
|
||||||
|
| 2 | privilege | VARCHAR(10) | Privilege description |
|
||||||
|
| 3 | db_name | VARCHAR(65) | Database name |
|
||||||
|
| 4 | table_name | VARCHAR(193) | Table name |
|
||||||
|
| 5 | condition | VARCHAR(49152) | The privilege filter for child tables |
|
||||||
|
|
|
@ -36,7 +36,7 @@ Shows information about connections to the system.
|
||||||
SHOW CONSUMERS;
|
SHOW CONSUMERS;
|
||||||
```
|
```
|
||||||
|
|
||||||
Shows information about all active consumers in the system.
|
Shows information about all consumers in the system.
|
||||||
|
|
||||||
## SHOW CREATE DATABASE
|
## SHOW CREATE DATABASE
|
||||||
|
|
||||||
|
@ -101,6 +101,7 @@ Note: TDengine Enterprise Edition only.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SHOW INDEXES FROM tbl_name [FROM db_name];
|
SHOW INDEXES FROM tbl_name [FROM db_name];
|
||||||
|
SHOW INDEXES FROM [db_name.]tbl_name;
|
||||||
```
|
```
|
||||||
|
|
||||||
Shows indices that have been created.
|
Shows indices that have been created.
|
||||||
|
@ -326,6 +327,7 @@ Note that only the information about the data blocks in the data file will be di
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SHOW TAGS FROM child_table_name [FROM db_name];
|
SHOW TAGS FROM child_table_name [FROM db_name];
|
||||||
|
SHOW TAGS FROM [db_name.]child_table_name;
|
||||||
```
|
```
|
||||||
|
|
||||||
Shows all tag information in a subtable.
|
Shows all tag information in a subtable.
|
||||||
|
|
|
@ -16,7 +16,7 @@ This statement creates a user account.
|
||||||
|
|
||||||
The maximum length of user_name is 23 bytes.
|
The maximum length of user_name is 23 bytes.
|
||||||
|
|
||||||
The maximum length of password is 128 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty.
|
The maximum length of password is 31 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty.
|
||||||
|
|
||||||
`SYSINFO` indicates whether the user is allowed to view system information. `1` means allowed, `0` means not allowed. System information includes server configuration, dnode, vnode, storage. The default value is `1`.
|
`SYSINFO` indicates whether the user is allowed to view system information. `1` means allowed, `0` means not allowed. System information includes server configuration, dnode, vnode, storage. The default value is `1`.
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_typ
|
||||||
```
|
```
|
||||||
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
||||||
- function_name: The scalar function name to be used in the SQL statement
|
- function_name: The scalar function name to be used in the SQL statement
|
||||||
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
|
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python (v3.7+) is supported. If this clause is omitted, C is assumed as the programming language.
|
||||||
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
||||||
- output_type: The data type of the results of the UDF.
|
- output_type: The data type of the results of the UDF.
|
||||||
|
|
||||||
|
|
|
@ -4,12 +4,12 @@ sidebar_label: Indexing
|
||||||
description: This document describes the SQL statements related to indexing in TDengine.
|
description: This document describes the SQL statements related to indexing in TDengine.
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine supports SMA and FULLTEXT indexing.
|
TDengine supports SMA and tag indexing.
|
||||||
|
|
||||||
## Create an Index
|
## Create an Index
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE FULLTEXT INDEX index_name ON tb_name (col_name [, col_name] ...)
|
CREATE INDEX index_name ON tb_name (col_name [, col_name] ...)
|
||||||
|
|
||||||
CREATE SMA INDEX index_name ON tb_name index_option
|
CREATE SMA INDEX index_name ON tb_name index_option
|
||||||
|
|
||||||
|
@ -28,9 +28,23 @@ Performs pre-aggregation on the specified column over the time window defined by
|
||||||
- WATERMARK: Enter a value between 0ms and 900000ms. The most precise unit supported is milliseconds. The default value is 5 seconds. This option can be used only on supertables.
|
- WATERMARK: Enter a value between 0ms and 900000ms. The most precise unit supported is milliseconds. The default value is 5 seconds. This option can be used only on supertables.
|
||||||
- MAX_DELAY: Enter a value between 1ms and 900000ms. The most precise unit supported is milliseconds. The default value is the value of interval provided that it does not exceed 900000ms. This option can be used only on supertables. Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance.
|
- MAX_DELAY: Enter a value between 1ms and 900000ms. The most precise unit supported is milliseconds. The default value is the value of interval provided that it does not exceed 900000ms. This option can be used only on supertables. Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance.
|
||||||
|
|
||||||
### FULLTEXT Indexing
|
```sql
|
||||||
|
DROP DATABASE IF EXISTS d0;
|
||||||
Creates a text index for the specified column. FULLTEXT indexing improves performance for queries with text filtering. The index_option syntax is not supported for FULLTEXT indexing. FULLTEXT indexing is supported for JSON tag columns only. Multiple columns cannot be indexed together. However, separate indices can be created for each column.
|
CREATE DATABASE d0;
|
||||||
|
USE d0;
|
||||||
|
CREATE TABLE IF NOT EXISTS st1 (ts timestamp, c1 int, c2 float, c3 double) TAGS (t1 int unsigned);
|
||||||
|
CREATE TABLE ct1 USING st1 TAGS(1000);
|
||||||
|
CREATE TABLE ct2 USING st1 TAGS(2000);
|
||||||
|
INSERT INTO ct1 VALUES(now+0s, 10, 2.0, 3.0);
|
||||||
|
INSERT INTO ct1 VALUES(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);
|
||||||
|
CREATE SMA INDEX sma_index_name1 ON st1 FUNCTION(max(c1),max(c2),min(c1)) INTERVAL(5m,10s) SLIDING(5m) WATERMARK 5s MAX_DELAY 1m;
|
||||||
|
-- query from SMA Index
|
||||||
|
ALTER LOCAL 'querySmaOptimize' '1';
|
||||||
|
SELECT max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m);
|
||||||
|
SELECT _wstart,_wend,_wduration,max(c2),min(c1) FROM st1 INTERVAL(5m,10s) SLIDING(5m);
|
||||||
|
-- query from raw data
|
||||||
|
ALTER LOCAL 'querySmaOptimize' '0';
|
||||||
|
```
|
||||||
|
|
||||||
## Delete an Index
|
## Delete an Index
|
||||||
|
|
||||||
|
@ -41,8 +55,8 @@ DROP INDEX index_name;
|
||||||
## View Indices
|
## View Indices
|
||||||
|
|
||||||
````sql
|
````sql
|
||||||
```sql
|
|
||||||
SHOW INDEXES FROM tbl_name [FROM db_name];
|
SHOW INDEXES FROM tbl_name [FROM db_name];
|
||||||
|
SHOW INDEXES FROM [db_name.]tbl_name ;
|
||||||
````
|
````
|
||||||
|
|
||||||
Shows indices that have been created for the specified database or table.
|
Shows indices that have been created for the specified database or table.
|
|
@ -18,6 +18,7 @@ description: This document describes how TDengine SQL has changed in version 3.0
|
||||||
| 8 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
|
| 8 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
|
||||||
| 9 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
|
| 9 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
|
||||||
| 10 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
|
| 10 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
|
||||||
|
| 11 | GEOMETRY | Added | Geometry
|
||||||
|
|
||||||
## SQL Syntax
|
## SQL Syntax
|
||||||
|
|
||||||
|
@ -33,7 +34,7 @@ The following data types can be used in the schema for standard tables.
|
||||||
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
||||||
| 7 | COMPACT VNODES | Not supported | Compacted the data on a vnode. Not supported.
|
| 7 | COMPACT VNODES | Not supported | Compacted the data on a vnode. Not supported.
|
||||||
| 8 | CREATE ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
| 8 | CREATE ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||||
| 9 | CREATE DATABASE | Modified | Deprecated<ul><li>BLOCKS: Specified the number of blocks for each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHE: Specified the size of the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>DAYS: The length of time to store in a single file. Replaced by DURATION. </li><li>FSYNC: Specified the fsync interval when WAL was set to 2. Replaced by WAL_FSYNC_PERIOD. </li><li>QUORUM: Specified the number of confirmations required. STRICT is now used to specify strong or weak consistency. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>WAL: Specified the WAL level. Replaced by WAL_LEVEL. <br/>Added</li><li>BUFFER: Specifies the size of the write cache pool for each vnode. </li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>DURATION: Replaces DAYS. Now supports units. </li><li>PAGES: Specifies the number of pages in the metadata storage engine cache on each vnode. </li><li>PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. </li><li>RETENTIONS: Specifies the aggregation interval and retention period </li><li>STRICT: Specifies whether strong data consistency is enabled. </li><li>SINGLE_STABLE: Specifies whether a database can contain multiple supertables. </li><li>VGROUPS: Specifies the initial number of vgroups when a database is created. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_ROLL_PERIOD: Specifies the WAL rotation period. </li><li>WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. <br/>Modified</li><li>KEEP: Now supports units. </li></ul>
|
| 9 | CREATE DATABASE | Modified | Deprecated<ul><li>BLOCKS: Specified the number of blocks for each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHE: Specified the size of the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>DAYS: The length of time to store in a single file. Replaced by DURATION. </li><li>FSYNC: Specified the fsync interval when WAL was set to 2. Replaced by WAL_FSYNC_PERIOD. </li><li>QUORUM: Specified the number of confirmations required. STRICT is now used to specify strong or weak consistency. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>WAL: Specified the WAL level. Replaced by WAL_LEVEL. <br/>Added</li><li>BUFFER: Specifies the size of the write cache pool for each vnode. </li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>DURATION: Replaces DAYS. Now supports units. </li><li>PAGES: Specifies the number of pages in the metadata storage engine cache on each vnode. </li><li>PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. </li><li>RETENTIONS: Specifies the aggregation interval and retention period </li><li>STRICT: Specifies whether strong data consistency is enabled. </li><li>SINGLE_STABLE: Specifies whether a database can contain multiple supertables. </li><li>VGROUPS: Specifies the initial number of vgroups when a database is created. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>KEEP: Now supports units. </li></ul>
|
||||||
| 10 | CREATE DNODE | Modified | Now supports specifying hostname and port separately<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
| 10 | CREATE DNODE | Modified | Now supports specifying hostname and port separately<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||||
| 11 | CREATE INDEX | Added | Creates an SMA index.
|
| 11 | CREATE INDEX | Added | Creates an SMA index.
|
||||||
| 12 | CREATE MNODE | Added | Creates an mnode.
|
| 12 | CREATE MNODE | Added | Creates an mnode.
|
||||||
|
|
|
@ -36,7 +36,7 @@ You can use below command to setup Grafana alert notification.
|
||||||
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||||
```
|
```
|
||||||
|
|
||||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||||
|
@ -214,19 +214,6 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||||
|cluster\_id|NCHAR|TAG|cluster id|
|
|cluster\_id|NCHAR|TAG|cluster id|
|
||||||
|
|
||||||
### logs table
|
|
||||||
|
|
||||||
`logs` table contains login information records.
|
|
||||||
|
|
||||||
|field|type|is\_tag|comment|
|
|
||||||
|:----|:---|:-----|:------|
|
|
||||||
|ts|TIMESTAMP||timestamp|
|
|
||||||
|level|VARCHAR||log level|
|
|
||||||
|content|NCHAR||log content|
|
|
||||||
|dnode\_id|INT|TAG|dnode id|
|
|
||||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
|
||||||
|cluster\_id|NCHAR|TAG|cluster id|
|
|
||||||
|
|
||||||
### log\_summary table
|
### log\_summary table
|
||||||
|
|
||||||
`log_summary` table contains log summary information records.
|
`log_summary` table contains log summary information records.
|
||||||
|
@ -274,7 +261,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -288,7 +275,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -302,7 +289,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
||||||
|
@ -330,7 +317,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_system\_cpu\_percent table
|
### taosadapter\_system\_cpu\_percent table
|
||||||
|
@ -340,6 +327,6 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||mertic value|
|
|gauge|DOUBLE||mertic value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
|
|
|
@ -79,6 +79,12 @@ Parameter Description:
|
||||||
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
|
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
|
||||||
- req_id: Optional parameter that specifies the request id for tracing.
|
- req_id: Optional parameter that specifies the request id for tracing.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the <+> plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
|
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
|
||||||
|
|
||||||
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
||||||
|
|
|
@ -32,25 +32,24 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
|
||||||
Native connections are supported on the same platforms as the TDengine client driver.
|
Native connections are supported on the same platforms as the TDengine client driver.
|
||||||
REST connection supports all platforms that can run Java.
|
REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
## Version support
|
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
|
||||||
|
|
||||||
## Recent update logs
|
## Recent update logs
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes |
|
| taos-jdbcdriver version | major changes | TDengine version |
|
||||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
|
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
|
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
||||||
| 3.2.0 | This version has been deprecated |
|
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
||||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
|
||||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
|
||||||
| 3.0.0 | Support for TDengine 3.0 |
|
| 3.2.0 | This version has been deprecated | - |
|
||||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
|
||||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
|
||||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
|
||||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
| 2.0.42 | Fix wasNull interface return value in WebSocket connection | - |
|
||||||
| 2.0.37 | Support json tags |
|
| 2.0.41 | Fix decode method of username and password in REST connection | - |
|
||||||
| 2.0.36 | Support schemaless writing |
|
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
|
||||||
|
| 2.0.38 | JDBC REST connections add bulk pull function | - |
|
||||||
|
| 2.0.37 | Support json tags | - |
|
||||||
|
| 2.0.36 | Support schemaless writing | - |
|
||||||
|
|
||||||
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
||||||
|
|
||||||
|
@ -102,6 +101,8 @@ For specific error codes, please refer to.
|
||||||
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||||
| 0x231a | password is required | Password information is missing when creating a connection |
|
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||||
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||||
|
| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||||
|
| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||||
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||||
|
@ -117,8 +118,8 @@ For specific error codes, please refer to.
|
||||||
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||||
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||||
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||||
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
| 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
|
||||||
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
| 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
|
||||||
|
|
||||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||||
|
@ -169,7 +170,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.1</version>
|
<version>3.2.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -285,10 +286,11 @@ The configuration parameters in the URL are as follows:
|
||||||
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance.
|
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance.
|
||||||
- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true.
|
- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true.
|
||||||
- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false.
|
- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false.
|
||||||
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms.
|
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms.
|
||||||
- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false.
|
- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false.
|
||||||
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true.
|
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true.
|
||||||
- useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection.
|
- useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection.
|
||||||
|
- httpPoolSize: size of REST concurrent requests. The default value is 20.
|
||||||
|
|
||||||
**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.
|
**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.
|
||||||
|
|
||||||
|
@ -352,10 +354,11 @@ The configuration parameters in properties are as follows.
|
||||||
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
||||||
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
||||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
||||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection.
|
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection.
|
||||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
|
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
|
||||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
|
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
|
||||||
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
|
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
|
||||||
|
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
|
||||||
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
|
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
|
||||||
|
|
||||||
### Priority of configuration parameters
|
### Priority of configuration parameters
|
||||||
|
@ -420,6 +423,19 @@ while(resultSet.next()){
|
||||||
|
|
||||||
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
||||||
|
|
||||||
|
### execute SQL with reqId
|
||||||
|
|
||||||
|
This reqId can be used to request link tracing.
|
||||||
|
|
||||||
|
```java
|
||||||
|
AbstractStatement aStmt = (AbstractStatement) connection.createStatement();
|
||||||
|
aStmt.execute("create database if not exists db", 1L);
|
||||||
|
aStmt.executeUpdate("use db", 2L);
|
||||||
|
try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) {
|
||||||
|
Timestamp ts = rs.getTimestamp(1);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Writing data via parameter binding
|
### Writing data via parameter binding
|
||||||
|
|
||||||
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
||||||
|
@ -913,14 +929,15 @@ public class SchemalessWsTest {
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||||
Connection connection = DriverManager.getConnection(url);
|
try(Connection connection = DriverManager.getConnection(url)){
|
||||||
init(connection);
|
init(connection);
|
||||||
|
|
||||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||||
System.exit(0);
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void init(Connection connection) throws SQLException {
|
private static void init(Connection connection) throws SQLException {
|
||||||
|
@ -936,6 +953,14 @@ public class SchemalessWsTest {
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
### Schemaless with reqId
|
||||||
|
|
||||||
|
This reqId can be used to request link tracing.
|
||||||
|
|
||||||
|
```java
|
||||||
|
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
|
||||||
|
```
|
||||||
|
|
||||||
### Data Subscription
|
### Data Subscription
|
||||||
|
|
||||||
The TDengine Java Connector supports subscription functionality with the following application API.
|
The TDengine Java Connector supports subscription functionality with the following application API.
|
||||||
|
@ -959,6 +984,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table`
|
||||||
|
|
||||||
```java
|
```java
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
config.setProperty("group.id", "group1");
|
config.setProperty("group.id", "group1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||||
|
@ -966,12 +992,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
|
||||||
TaosConsumer consumer = new TaosConsumer<>(config);
|
TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used.
|
||||||
- enable.auto.commit: Specifies whether to commit automatically.
|
- enable.auto.commit: Specifies whether to commit automatically.
|
||||||
- group.id: consumer: Specifies the group that the consumer is in.
|
- group.id: consumer: Specifies the group that the consumer is in.
|
||||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||||
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||||
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||||
|
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
|
||||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||||
|
|
||||||
#### Subscribe to consume data
|
#### Subscribe to consume data
|
||||||
|
@ -988,6 +1016,42 @@ while(true) {
|
||||||
|
|
||||||
`poll` obtains one message each time it is run.
|
`poll` obtains one message each time it is run.
|
||||||
|
|
||||||
|
#### Assignment subscription Offset
|
||||||
|
|
||||||
|
```java
|
||||||
|
// get offset
|
||||||
|
long position(TopicPartition partition) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||||
|
|
||||||
|
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
|
||||||
|
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||||
|
```
|
||||||
|
|
||||||
|
Example usage is as follows.
|
||||||
|
|
||||||
|
```java
|
||||||
|
String topic = "offset_seek_test";
|
||||||
|
Map<TopicPartition, Long> offset = null;
|
||||||
|
try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
||||||
|
consumer.subscribe(Collections.singletonList(topic));
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
if (i == 3) {
|
||||||
|
// Saving consumption position
|
||||||
|
offset = consumer.position(topic);
|
||||||
|
}
|
||||||
|
if (i == 5) {
|
||||||
|
// reset consumption to the previously saved position
|
||||||
|
for (Map.Entry<TopicPartition, Long> entry : offset.entrySet()) {
|
||||||
|
consumer.seek(entry.getKey(), entry.getValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(500));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
#### Close subscriptions
|
#### Close subscriptions
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
@ -1015,10 +1079,20 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
public ConsumerLoop() throws SQLException {
|
public ConsumerLoop() throws SQLException {
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
|
config.setProperty("td.connect.type", "jni");
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||||
|
config.setProperty("td.connect.user", "root");
|
||||||
|
config.setProperty("td.connect.pass", "taosdata");
|
||||||
|
config.setProperty("auto.offset.reset", "earliest");
|
||||||
config.setProperty("msg.with.table.name", "true");
|
config.setProperty("msg.with.table.name", "true");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
|
config.setProperty("auto.commit.interval.ms", "1000");
|
||||||
config.setProperty("group.id", "group1");
|
config.setProperty("group.id", "group1");
|
||||||
|
config.setProperty("client.id", "1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||||
|
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||||
|
config.setProperty("experimental.snapshot.enable", "true");
|
||||||
|
|
||||||
|
|
||||||
this.consumer = new TaosConsumer<>(config);
|
this.consumer = new TaosConsumer<>(config);
|
||||||
this.topics = Collections.singletonList("topic_speed");
|
this.topics = Collections.singletonList("topic_speed");
|
||||||
|
@ -1090,12 +1164,19 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
public ConsumerLoop() throws SQLException {
|
public ConsumerLoop() throws SQLException {
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
|
||||||
config.setProperty("td.connect.type", "ws");
|
config.setProperty("td.connect.type", "ws");
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||||
|
config.setProperty("td.connect.user", "root");
|
||||||
|
config.setProperty("td.connect.pass", "taosdata");
|
||||||
|
config.setProperty("auto.offset.reset", "earliest");
|
||||||
config.setProperty("msg.with.table.name", "true");
|
config.setProperty("msg.with.table.name", "true");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
|
config.setProperty("auto.commit.interval.ms", "1000");
|
||||||
config.setProperty("group.id", "group2");
|
config.setProperty("group.id", "group2");
|
||||||
|
config.setProperty("client.id", "1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||||
|
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||||
|
config.setProperty("experimental.snapshot.enable", "true");
|
||||||
|
|
||||||
this.consumer = new TaosConsumer<>(config);
|
this.consumer = new TaosConsumer<>(config);
|
||||||
this.topics = Collections.singletonList("topic_speed");
|
this.topics = Collections.singletonList("topic_speed");
|
||||||
|
@ -1236,6 +1317,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
||||||
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
|
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
|
||||||
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
|
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
|
||||||
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
|
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
|
||||||
|
- consumer-demo: consumer TDengine data example, the consumption rate can be controlled by parameters.
|
||||||
|
|
||||||
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||||
|
|
||||||
|
@ -1276,3 +1358,7 @@ For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
||||||
## API Reference
|
## API Reference
|
||||||
|
|
||||||
[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver)
|
[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
|
|
@ -27,20 +27,62 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
|
||||||
Native connections are supported on the same platforms as the TDengine client driver.
|
Native connections are supported on the same platforms as the TDengine client driver.
|
||||||
Websocket connections are supported on all platforms that can run Go.
|
Websocket connections are supported on all platforms that can run Go.
|
||||||
|
|
||||||
## Version support
|
## Version history
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
| connector-rust version | TDengine version | major features |
|
||||||
|
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||||
|
| v0.9.2 | 3.0.7.0 or later | STMT: Get tag_fields and col_fields under ws. |
|
||||||
|
| v0.8.12 | 3.0.5.0 | TMQ: Get consuming progress and seek offset to consume. |
|
||||||
|
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
|
||||||
|
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
|
||||||
|
| v0.6.0 | 3.0.0.0 | Base features. |
|
||||||
|
|
||||||
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
|
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
|
||||||
|
|
||||||
## Installation
|
## Handling exceptions
|
||||||
|
|
||||||
|
After the error is reported, the specific information of the error can be obtained:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
match conn.exec(sql) {
|
||||||
|
Ok(_) => {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ERROR: {:?}", e);
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## TDengine DataType vs. Rust DataType
|
||||||
|
|
||||||
|
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows:
|
||||||
|
|
||||||
|
| TDengine DataType | Rust DataType |
|
||||||
|
| ----------------- | ----------------- |
|
||||||
|
| TIMESTAMP | Timestamp |
|
||||||
|
| INT | i32 |
|
||||||
|
| BIGINT | i64 |
|
||||||
|
| FLOAT | f32 |
|
||||||
|
| DOUBLE | f64 |
|
||||||
|
| SMALLINT | i16 |
|
||||||
|
| TINYINT | i8 |
|
||||||
|
| BOOL | bool |
|
||||||
|
| BINARY | Vec<u8\> |
|
||||||
|
| NCHAR | String |
|
||||||
|
| JSON | serde_json::Value |
|
||||||
|
|
||||||
|
Note: Only TAG supports JSON types
|
||||||
|
|
||||||
|
## Installation Steps
|
||||||
|
|
||||||
### Pre-installation preparation
|
### Pre-installation preparation
|
||||||
|
|
||||||
* Install the Rust development toolchain
|
* Install the Rust development toolchain
|
||||||
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
|
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
|
||||||
|
|
||||||
### Add taos dependency
|
### Install the connectors
|
||||||
|
|
||||||
Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows:
|
Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows:
|
||||||
|
|
||||||
|
@ -141,7 +183,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
|
||||||
let conn1 = builder.build();
|
let conn1 = builder.build();
|
||||||
|
|
||||||
// use websocket protocol.
|
// use websocket protocol.
|
||||||
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
|
let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
|
||||||
|
let conn2 = builder2.build();
|
||||||
```
|
```
|
||||||
|
|
||||||
After the connection is established, you can perform operations on your database.
|
After the connection is established, you can perform operations on your database.
|
||||||
|
@ -223,41 +266,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se
|
||||||
|
|
||||||
## Usage examples
|
## Usage examples
|
||||||
|
|
||||||
### Write data
|
### Create database and tables
|
||||||
|
|
||||||
#### SQL Write
|
```rust
|
||||||
|
use taos::*;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let dsn = "taos://localhost:6030";
|
||||||
|
let builder = TaosBuilder::from_dsn(dsn)?;
|
||||||
|
|
||||||
|
let taos = builder.build()?;
|
||||||
|
|
||||||
|
let db = "query";
|
||||||
|
|
||||||
|
// create database
|
||||||
|
taos.exec_many([
|
||||||
|
format!("DROP DATABASE IF EXISTS `{db}`"),
|
||||||
|
format!("CREATE DATABASE `{db}`"),
|
||||||
|
format!("USE `{db}`"),
|
||||||
|
])
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// create table
|
||||||
|
taos.exec_many([
|
||||||
|
// create super table
|
||||||
|
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
||||||
|
TAGS (`groupid` INT, `location` BINARY(16))",
|
||||||
|
// create child table
|
||||||
|
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||||
|
]).await?;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
||||||
|
|
||||||
|
### Insert data
|
||||||
|
|
||||||
<RustInsert />
|
<RustInsert />
|
||||||
|
|
||||||
#### STMT Write
|
|
||||||
|
|
||||||
<RustBind />
|
|
||||||
|
|
||||||
#### Schemaless Write
|
|
||||||
|
|
||||||
<RustSml />
|
|
||||||
|
|
||||||
### Query data
|
### Query data
|
||||||
|
|
||||||
<RustQuery />
|
<RustQuery />
|
||||||
|
|
||||||
## API Reference
|
### execute SQL with req_id
|
||||||
|
|
||||||
### Connector Constructor
|
This req_id can be used to request link tracing.
|
||||||
|
|
||||||
You create a connector constructor by using a DSN.
|
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
let cfg = TaosBuilder::default().build()?;
|
let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?;
|
||||||
```
|
```
|
||||||
|
|
||||||
You use the builder object to create multiple connections.
|
### Writing data via parameter binding
|
||||||
|
|
||||||
|
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
||||||
|
|
||||||
|
Parameter binding details see [API Reference](#stmt-api)
|
||||||
|
|
||||||
|
<RustBind />
|
||||||
|
|
||||||
|
### Schemaless Writing
|
||||||
|
|
||||||
|
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
|
||||||
|
|
||||||
|
<RustSml />
|
||||||
|
|
||||||
|
### Schemaless with req_id
|
||||||
|
|
||||||
|
This req_id can be used to request link tracing.
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
let conn: Taos = cfg.build();
|
let sml_data = SmlDataBuilder::default()
|
||||||
|
.protocol(SchemalessProtocol::Line)
|
||||||
|
.data(data)
|
||||||
|
.req_id(100u64)
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
client.put(&sml_data)?
|
||||||
```
|
```
|
||||||
|
|
||||||
### Connection pooling
|
### Data Subscription
|
||||||
|
|
||||||
|
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
|
||||||
|
|
||||||
|
#### Create a Topic
|
||||||
|
|
||||||
|
```rust
|
||||||
|
taos.exec_many([
|
||||||
|
// create topic for subscription
|
||||||
|
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
|
||||||
|
])
|
||||||
|
.await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Create a Consumer
|
||||||
|
|
||||||
|
You create a TMQ connector by using a DSN.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a consumer:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let mut consumer = tmq.build()?;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Subscribe to consume data
|
||||||
|
|
||||||
|
A single consumer can subscribe to one or more topics.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
consumer.subscribe(["tmq_meters"]).await?;
|
||||||
|
```
|
||||||
|
|
||||||
|
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
{
|
||||||
|
let mut stream = consumer.stream();
|
||||||
|
|
||||||
|
while let Some((offset, message)) = stream.try_next().await? {
|
||||||
|
// get information from offset
|
||||||
|
|
||||||
|
// the topic
|
||||||
|
let topic = offset.topic();
|
||||||
|
// the vgroup id, like partition id in kafka.
|
||||||
|
let vgroup_id = offset.vgroup_id();
|
||||||
|
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
|
||||||
|
|
||||||
|
if let Some(data) = message.into_data() {
|
||||||
|
while let Some(block) = data.fetch_raw_block().await? {
|
||||||
|
// one block for one table, get table name if needed
|
||||||
|
let name = block.table_name();
|
||||||
|
let records: Vec<Record> = block.deserialize().try_collect()?;
|
||||||
|
println!(
|
||||||
|
"** table: {}, got {} records: {:#?}\n",
|
||||||
|
name.unwrap(),
|
||||||
|
records.len(),
|
||||||
|
records
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
consumer.commit(offset).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Get assignments:
|
||||||
|
|
||||||
|
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let assignments = consumer.assignments().await.unwrap();
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Assignment subscription Offset
|
||||||
|
|
||||||
|
Seek offset:
|
||||||
|
|
||||||
|
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||||
|
|
||||||
|
```rust
|
||||||
|
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Close subscriptions
|
||||||
|
|
||||||
|
```rust
|
||||||
|
consumer.unsubscribe().await;
|
||||||
|
```
|
||||||
|
|
||||||
|
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
|
||||||
|
|
||||||
|
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
|
||||||
|
- `client.id`: Subscriber client ID.
|
||||||
|
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
|
||||||
|
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||||
|
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||||
|
|
||||||
|
#### Full Sample Code
|
||||||
|
|
||||||
|
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||||
|
|
||||||
|
### Use with connection pool
|
||||||
|
|
||||||
In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2].
|
In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2].
|
||||||
|
|
||||||
|
@ -287,7 +480,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos].
|
||||||
let taos = pool.get()?;
|
let taos = pool.get()?;
|
||||||
```
|
```
|
||||||
|
|
||||||
### Connectors
|
### More sample programs
|
||||||
|
|
||||||
|
The source code of the sample application is under `TDengine/examples/rust` :
|
||||||
|
|
||||||
|
[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust)
|
||||||
|
|
||||||
|
## Frequently Asked Questions
|
||||||
|
|
||||||
|
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
The [Taos][struct.Taos] object provides an API to perform operations on multiple databases.
|
The [Taos][struct.Taos] object provides an API to perform operations on multiple databases.
|
||||||
|
|
||||||
|
@ -373,9 +576,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required.
|
||||||
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
|
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
|
||||||
- `.use_database(database: &str)`: Executes the `USE` statement.
|
- `.use_database(database: &str)`: Executes the `USE` statement.
|
||||||
|
|
||||||
In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage.
|
In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage.
|
||||||
|
|
||||||
### Bind Interface
|
<p>
|
||||||
|
<a id="stmt-api" style={{color:'#141414'}}>
|
||||||
|
Bind Interface
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement.
|
Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement.
|
||||||
|
|
||||||
|
@ -386,7 +593,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
|
||||||
|
|
||||||
The bind object provides a set of interfaces for implementing parameter binding.
|
The bind object provides a set of interfaces for implementing parameter binding.
|
||||||
|
|
||||||
#### `.set_tbname(name)`
|
`.set_tbname(name)`
|
||||||
|
|
||||||
To bind table names.
|
To bind table names.
|
||||||
|
|
||||||
|
@ -395,7 +602,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
|
||||||
stmt.set_tbname("d0")?;
|
stmt.set_tbname("d0")?;
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `.set_tags(&[tag])`
|
`.set_tags(&[tag])`
|
||||||
|
|
||||||
Bind sub-table table names and tag values when the SQL statement uses a super table.
|
Bind sub-table table names and tag values when the SQL statement uses a super table.
|
||||||
|
|
||||||
|
@ -405,7 +612,7 @@ stmt.set_tbname("d0")?;
|
||||||
stmt.set_tags(&[Value::VarChar("taos".to_string())])?;
|
stmt.set_tags(&[Value::VarChar("taos".to_string())])?;
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `.bind(&[column])`
|
`.bind(&[column])`
|
||||||
|
|
||||||
Bind value types. Use the [ColumnView] structure to create and bind the required types.
|
Bind value types. Use the [ColumnView] structure to create and bind the required types.
|
||||||
|
|
||||||
|
@ -429,7 +636,7 @@ let params = vec![
|
||||||
let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
|
let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `.execute()`
|
`.execute()`
|
||||||
|
|
||||||
Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`.
|
Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`.
|
||||||
|
|
||||||
|
@ -442,82 +649,12 @@ stmt.execute()?;
|
||||||
//stmt.execute()?;
|
//stmt.execute()?;
|
||||||
```
|
```
|
||||||
|
|
||||||
For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs).
|
For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/taos/examples/bind.rs).
|
||||||
|
|
||||||
### Subscriptions
|
|
||||||
|
|
||||||
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
|
|
||||||
|
|
||||||
You create a TMQ connector by using a DSN.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
|
|
||||||
```
|
|
||||||
|
|
||||||
Create a consumer:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
let mut consumer = tmq.build()?;
|
|
||||||
```
|
|
||||||
|
|
||||||
A single consumer can subscribe to one or more topics.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
consumer.subscribe(["tmq_meters"]).await?;
|
|
||||||
```
|
|
||||||
|
|
||||||
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
{
|
|
||||||
let mut stream = consumer.stream();
|
|
||||||
|
|
||||||
while let Some((offset, message)) = stream.try_next().await? {
|
|
||||||
// get information from offset
|
|
||||||
|
|
||||||
// the topic
|
|
||||||
let topic = offset.topic();
|
|
||||||
// the vgroup id, like partition id in kafka.
|
|
||||||
let vgroup_id = offset.vgroup_id();
|
|
||||||
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
|
|
||||||
|
|
||||||
if let Some(data) = message.into_data() {
|
|
||||||
while let Some(block) = data.fetch_raw_block().await? {
|
|
||||||
// one block for one table, get table name if needed
|
|
||||||
let name = block.table_name();
|
|
||||||
let records: Vec<Record> = block.deserialize().try_collect()?;
|
|
||||||
println!(
|
|
||||||
"** table: {}, got {} records: {:#?}\n",
|
|
||||||
name.unwrap(),
|
|
||||||
records.len(),
|
|
||||||
records
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
consumer.commit(offset).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Unsubscribe:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
consumer.unsubscribe().await;
|
|
||||||
```
|
|
||||||
|
|
||||||
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
|
|
||||||
|
|
||||||
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
|
|
||||||
- `client.id`: Subscriber client ID.
|
|
||||||
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
|
|
||||||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
|
||||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
|
||||||
|
|
||||||
For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
|
||||||
|
|
||||||
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
|
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
|
||||||
|
|
||||||
[taos]: https://github.com/taosdata/rust-connector-taos
|
[taos]: https://github.com/taosdata/taos-connector-rust
|
||||||
[r2d2]: https://crates.io/crates/r2d2
|
[r2d2]: https://crates.io/crates/r2d2
|
||||||
[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html
|
[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html
|
||||||
[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html
|
[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html
|
||||||
|
|
|
@ -20,18 +20,76 @@ The source code for the Python connector is hosted on [GitHub](https://github.co
|
||||||
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
|
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
|
||||||
- REST connections are supported on all platforms that can run Python.
|
- REST connections are supported on all platforms that can run Python.
|
||||||
|
|
||||||
## Version selection
|
### Supported features
|
||||||
|
|
||||||
We recommend using the latest version of `taospy`, regardless of the version of TDengine.
|
|
||||||
|
|
||||||
## Supported features
|
|
||||||
|
|
||||||
- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing.
|
- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing.
|
||||||
- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.).
|
- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.).
|
||||||
|
|
||||||
## Installation
|
## Version selection
|
||||||
|
|
||||||
### Preparation
|
We recommend using the latest version of `taospy`, regardless of the version of TDengine.
|
||||||
|
|
||||||
|
|Python Connector Version|major changes|
|
||||||
|
|:-------------------:|:----:|
|
||||||
|
|2.7.9|support for getting assignment and seek function on subscription|
|
||||||
|
|2.7.8|add `execute_many` method|
|
||||||
|
|
||||||
|
|Python Websocket Connector Version|major changes|
|
||||||
|
|:----------------------------:|:-----:|
|
||||||
|
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|
||||||
|
|0.2.4|support `unsubscribe` on subscription|
|
||||||
|
|
||||||
|
## Handling Exceptions
|
||||||
|
|
||||||
|
There are 4 types of exception in python connector.
|
||||||
|
|
||||||
|
- The exception of Python Connector itself.
|
||||||
|
- The exception of native library.
|
||||||
|
- The exception of websocket
|
||||||
|
- The exception of subscription.
|
||||||
|
- The exception of other TDengine function modules.
|
||||||
|
|
||||||
|
|Error Type|Description|Suggested Actions|
|
||||||
|
|:--------:|:---------:|:---------------:|
|
||||||
|
|InterfaceError|the native library is too old that it cannot support the function|please check the TDengine client version|
|
||||||
|
|ConnectionError|connection error|please check TDengine's status and the connection params|
|
||||||
|
|DatabaseError|database error|please upgrade Python connector to latest|
|
||||||
|
|OperationalError|operation error||
|
||||||
|
|ProgrammingError|||
|
||||||
|
|StatementError|the exception of stmt||
|
||||||
|
|ResultError|||
|
||||||
|
|SchemalessError|the exception of stmt schemaless||
|
||||||
|
|TmqError|the exception of stmt tmq||
|
||||||
|
|
||||||
|
It usually uses try-expect to handle exceptions in python. For exception handling, please refer to [Python Errors and Exceptions Documentation](https://docs.python.org/3/tutorial/errors.html).
|
||||||
|
|
||||||
|
All exceptions from the Python Connector are thrown directly. Applications should handle these exceptions. For example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/handle_exception.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
## TDengine DataType vs. Python DataType
|
||||||
|
|
||||||
|
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Python is as follows:
|
||||||
|
|
||||||
|
|TDengine DataType|Python DataType|
|
||||||
|
|:---------------:|:-------------:|
|
||||||
|
|TIMESTAMP|datetime|
|
||||||
|
|INT|int|
|
||||||
|
|BIGINT|int|
|
||||||
|
|FLOAT|float|
|
||||||
|
|DOUBLE|int|
|
||||||
|
|SMALLINT|int|
|
||||||
|
|TINYINT|int|
|
||||||
|
|BOOL|bool|
|
||||||
|
|BINARY|str|
|
||||||
|
|NCHAR|str|
|
||||||
|
|JSON|str|
|
||||||
|
|
||||||
|
## Installation Steps
|
||||||
|
|
||||||
|
### Pre-installation preparation
|
||||||
|
|
||||||
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
|
||||||
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
|
||||||
|
@ -217,7 +275,7 @@ Transfer-Encoding: chunked
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
### Using connectors to establish connections
|
### Specify the Host and Properties to get the connection
|
||||||
|
|
||||||
The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort.
|
The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort.
|
||||||
|
|
||||||
|
@ -273,7 +331,69 @@ The parameter of `connect()` is the url of TDengine, and the protocol is `taosws
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Example program
|
### Priority of configuration parameters
|
||||||
|
|
||||||
|
If the configuration parameters are duplicated in the parameters or client configuration file, the priority of the parameters, from highest to lowest, are as follows:
|
||||||
|
|
||||||
|
1. Parameters in `connect` function.
|
||||||
|
2. the configuration file taos.cfg of the TDengine client driver when using a native connection.
|
||||||
|
|
||||||
|
## Usage examples
|
||||||
|
|
||||||
|
### Create database and tables
|
||||||
|
|
||||||
|
<Tabs defaultValue="rest">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
```python
|
||||||
|
conn = taos.connect()
|
||||||
|
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
|
||||||
|
conn.execute("DROP DATABASE IF EXISTS test")
|
||||||
|
conn.execute("CREATE DATABASE test")
|
||||||
|
# change database. same as execute "USE db"
|
||||||
|
conn.select_db("test")
|
||||||
|
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
|
```python
|
||||||
|
conn = taosrest.connect(url="http://localhost:6041")
|
||||||
|
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
|
||||||
|
conn.execute("DROP DATABASE IF EXISTS test")
|
||||||
|
conn.execute("CREATE DATABASE test")
|
||||||
|
conn.execute("USE test")
|
||||||
|
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
```python
|
||||||
|
conn = taosws.connect("taosws://localhost:6041")
|
||||||
|
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
|
||||||
|
conn.execute("DROP DATABASE IF EXISTS test")
|
||||||
|
conn.execute("CREATE DATABASE test")
|
||||||
|
conn.execute("USE test")
|
||||||
|
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
### Insert data
|
||||||
|
|
||||||
|
```python
|
||||||
|
conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
now is an internal function. The default is the current time of the client's computer. now + 1s represents the current time of the client plus 1 second, followed by the number representing the unit of time: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks), n (months), y (years).
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
### Basic Usage
|
### Basic Usage
|
||||||
|
|
||||||
|
@ -343,6 +463,8 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="websocket" label="WebSocket connection">
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
The `Connection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
|
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
|
||||||
```
|
```
|
||||||
|
@ -353,7 +475,47 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
### Usage with req_id
|
### Querying Data
|
||||||
|
|
||||||
|
<Tabs defaultValue="rest">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
|
||||||
|
```
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
|
||||||
|
:::
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
|
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/rest_client_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
### Execute SQL with reqId
|
||||||
|
|
||||||
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
|
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
|
||||||
|
|
||||||
|
@ -362,7 +524,7 @@ By using the optional req_id parameter, you can specify a request ID that can be
|
||||||
|
|
||||||
##### TaosConnection class
|
##### TaosConnection class
|
||||||
|
|
||||||
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
|
As the way to connect introduced above but add `req_id` argument.
|
||||||
|
|
||||||
```python title="execute method"
|
```python title="execute method"
|
||||||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
|
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
|
||||||
|
@ -372,13 +534,9 @@ The `TaosConnection` class contains both an implementation of the PEP249 Connect
|
||||||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
|
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip
|
|
||||||
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
|
|
||||||
:::
|
|
||||||
|
|
||||||
##### Use of TaosResult class
|
##### Use of TaosResult class
|
||||||
|
|
||||||
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
|
As the way to fetch data introduced above but add `req_id` argument.
|
||||||
|
|
||||||
```python title="blocks_iter method"
|
```python title="blocks_iter method"
|
||||||
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
|
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
|
||||||
|
@ -391,17 +549,12 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
|
||||||
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
|
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
|
||||||
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="rest" label="REST connection">
|
<TabItem value="rest" label="REST connection">
|
||||||
|
|
||||||
##### Use of TaosRestCursor class
|
##### Use of TaosRestCursor class
|
||||||
|
|
||||||
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
|
As the way to connect introduced above but add `req_id` argument.
|
||||||
|
|
||||||
```python title="Use of TaosRestCursor"
|
```python title="Use of TaosRestCursor"
|
||||||
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
|
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
|
||||||
|
@ -421,8 +574,11 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
|
||||||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="websocket" label="WebSocket connection">
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
As the way to connect introduced above but add `req_id` argument.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
|
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
|
||||||
```
|
```
|
||||||
|
@ -459,26 +615,157 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
### Schemaless Insert
|
### Writing data via parameter binding
|
||||||
|
|
||||||
|
The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
##### Create Stmt
|
||||||
|
|
||||||
|
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
|
||||||
|
|
||||||
|
```
|
||||||
|
import taos
|
||||||
|
|
||||||
|
conn = taos.connect()
|
||||||
|
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||||
|
```
|
||||||
|
|
||||||
|
##### parameter binding
|
||||||
|
|
||||||
|
Call the `new_multi_binds` function to create the parameter list for parameter bindings.
|
||||||
|
|
||||||
|
```
|
||||||
|
params = new_multi_binds(16)
|
||||||
|
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
|
||||||
|
params[1].bool((True, None, False))
|
||||||
|
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||||
|
params[3].tinyint([0, 127, None])
|
||||||
|
params[4].smallint([3, None, 2])
|
||||||
|
params[5].int([3, 4, None])
|
||||||
|
params[6].bigint([3, 4, None])
|
||||||
|
params[7].tinyint_unsigned([3, 4, None])
|
||||||
|
params[8].smallint_unsigned([3, 4, None])
|
||||||
|
params[9].int_unsigned([3, 4, None])
|
||||||
|
params[10].bigint_unsigned([3, 4, None])
|
||||||
|
params[11].float([3, None, 1])
|
||||||
|
params[12].double([3, None, 1.2])
|
||||||
|
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||||
|
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||||
|
params[15].timestamp([None, None, 1626861392591])
|
||||||
|
```
|
||||||
|
|
||||||
|
Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.bind_param_batch(params)
|
||||||
|
```
|
||||||
|
|
||||||
|
##### execute sql
|
||||||
|
|
||||||
|
Call `execute` method to execute sql.
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.execute()
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Close Stmt
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Example
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/stmt_example.py}}
|
||||||
|
```
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
##### Create Stmt
|
||||||
|
|
||||||
|
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
|
||||||
|
|
||||||
|
```
|
||||||
|
import taosws
|
||||||
|
|
||||||
|
conn = taosws.connect('taosws://localhost:6041/test')
|
||||||
|
stmt = conn.statement()
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Prepare sql
|
||||||
|
|
||||||
|
Call `prepare` method in stmt to prepare sql.
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
|
||||||
|
```
|
||||||
|
|
||||||
|
##### parameter binding
|
||||||
|
|
||||||
|
Call the `bind_param` method to bind parameters.
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.bind_param([
|
||||||
|
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||||
|
taosws.ints_to_column([1, 2, 3, 4]),
|
||||||
|
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||||
|
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||||
|
])
|
||||||
|
```
|
||||||
|
|
||||||
|
Call the `add_batch` method to add parameters to the batch.
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.add_batch()
|
||||||
|
```
|
||||||
|
|
||||||
|
##### execute sql
|
||||||
|
|
||||||
|
Call `execute` method to execute sql.
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.execute()
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Close Stmt
|
||||||
|
|
||||||
|
```
|
||||||
|
stmt.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Example
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/stmt_websocket_example.py}}
|
||||||
|
```
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
### Schemaless Writing
|
||||||
|
|
||||||
Connector support schemaless insert.
|
Connector support schemaless insert.
|
||||||
|
|
||||||
<Tabs defaultValue="list">
|
<Tabs defaultValue="list">
|
||||||
<TabItem value="list" label="List Insert">
|
<TabItem value="list" label="List Insert">
|
||||||
|
|
||||||
Simple insert
|
##### Simple insert
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/schemaless_insert.py}}
|
{{#include docs/examples/python/schemaless_insert.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
Insert with ttl argument
|
##### Insert with ttl argument
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/schemaless_insert_ttl.py}}
|
{{#include docs/examples/python/schemaless_insert_ttl.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
Insert with req_id argument
|
##### Insert with req_id argument
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/schemaless_insert_req_id.py}}
|
{{#include docs/examples/python/schemaless_insert_req_id.py}}
|
||||||
|
@ -488,19 +775,19 @@ Insert with req_id argument
|
||||||
|
|
||||||
<TabItem value="raw" label="Raw Insert">
|
<TabItem value="raw" label="Raw Insert">
|
||||||
|
|
||||||
Simple insert
|
##### Simple insert
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/schemaless_insert_raw.py}}
|
{{#include docs/examples/python/schemaless_insert_raw.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
Insert with ttl argument
|
##### Insert with ttl argument
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
|
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
Insert with req_id argument
|
##### Insert with req_id argument
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
|
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
|
||||||
|
@ -509,26 +796,226 @@ Insert with req_id argument
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
### Schemaless with reqId
|
||||||
|
|
||||||
|
There is a optional parameter called `req_id` in `schemaless_insert` and `schemaless_insert_raw` method. This reqId can be used to request link tracing.
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/schemaless_insert_req_id.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data Subscription
|
||||||
|
|
||||||
|
Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
|
||||||
|
|
||||||
|
#### Create a Topic
|
||||||
|
|
||||||
|
To create topic, please refer to [Data Subscription](../../../develop/tmq/#create-a-topic).
|
||||||
|
|
||||||
|
#### Create a Consumer
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
The consumer in the connector contains the subscription api. The syntax for creating a consumer is consumer = Consumer(configs). For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
|
||||||
|
|
||||||
|
```python
|
||||||
|
from taos.tmq import Consumer
|
||||||
|
|
||||||
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
|
```
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
In addition to native connections, the connector also supports subscriptions via websockets.
|
||||||
|
|
||||||
|
The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
|
||||||
|
|
||||||
|
```python
|
||||||
|
import taosws
|
||||||
|
|
||||||
|
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
#### Subscribe to a Topic
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
The `subscribe` function is used to subscribe to a list of topics.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.subscribe(['topic1', 'topic2'])
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
The `subscribe` function is used to subscribe to a list of topics.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.subscribe(['topic1', 'topic2'])
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
#### Consume messages
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||||
|
|
||||||
|
```python
|
||||||
|
while True:
|
||||||
|
res = consumer.poll(1)
|
||||||
|
if not res:
|
||||||
|
continue
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
val = res.value()
|
||||||
|
|
||||||
|
for block in val:
|
||||||
|
print(block.fetchall())
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||||
|
|
||||||
|
```python
|
||||||
|
while True:
|
||||||
|
res = consumer.poll(timeout=1.0)
|
||||||
|
if not res:
|
||||||
|
continue
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
for block in message:
|
||||||
|
for row in block:
|
||||||
|
print(row)
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
#### Assignment subscription Offset
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
The `assignment` function is used to get the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
```
|
||||||
|
|
||||||
|
The `seek` function is used to reset the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
tp = TopicPartition(topic='topic1', partition=0, offset=0)
|
||||||
|
consumer.seek(tp)
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
The `assignment` function is used to get the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
```
|
||||||
|
|
||||||
|
The `seek` function is used to reset the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.seek(topic='topic1', partition=0, offset=0)
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
#### Close subscriptions
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
You should unsubscribe to the topics and close the consumer after consuming.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.unsubscribe()
|
||||||
|
consumer.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
You should unsubscribe to the topics and close the consumer after consuming.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.unsubscribe()
|
||||||
|
consumer.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
#### Full Sample Code
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_websocket_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
### Other sample programs
|
### Other sample programs
|
||||||
|
|
||||||
| Example program links | Example program content |
|
| Example program links | Example program content |
|
||||||
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
|
|-----------------------|-------------------------|
|
||||||
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
|
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
|
||||||
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
|
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | parameter binding, bind one row at once |
|
||||||
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
|
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
|
||||||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
|
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
|
||||||
| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | TMQ subscription |
|
| [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | TMQ subscription |
|
||||||
|
|
||||||
## Other notes
|
## Other notes
|
||||||
|
|
||||||
### Exception handling
|
|
||||||
|
|
||||||
All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
{{#include docs/examples/python/handle_exception.py}}
|
|
||||||
```
|
|
||||||
|
|
||||||
### About nanoseconds
|
### About nanoseconds
|
||||||
|
|
||||||
Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full.
|
Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full.
|
||||||
|
|
|
@ -0,0 +1,87 @@
|
||||||
|
---
|
||||||
|
toc_max_heading_level: 4
|
||||||
|
sidebar_label: R
|
||||||
|
title: R Language Connector
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
|
import Rdemo from "../../07-develop/01-connect/_connect_r.mdx"
|
||||||
|
|
||||||
|
By using the RJDBC library in R, you can enable R programs to access TDengine data. Here are the installation process, configuration steps, and an example code in R.
|
||||||
|
|
||||||
|
## Installation Process
|
||||||
|
|
||||||
|
Before getting started, make sure you have installed the R language environment. Then, follow these steps to install and configure the RJDBC library:
|
||||||
|
|
||||||
|
1. Install Java Development Kit (JDK): RJDBC library requires Java environment. Download the appropriate JDK for your operating system from the official Oracle website and follow the installation guide.
|
||||||
|
|
||||||
|
2. Install the RJDBC library: Execute the following command in the R console to install the RJDBC library.
|
||||||
|
|
||||||
|
```r
|
||||||
|
install.packages("RJDBC", repos='http://cran.us.r-project.org')
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
1. The default R language package version 4.2 which shipped with Ubuntu might lead unresponsive bug. Please install latest version of R language package from the [official website](https://www.r-project.org/).
|
||||||
|
2. On Linux systems, installing the RJDBC package may require installing the necessary components for compilation. For example, on Ubuntu, you can execute the command ``apt install -y libbz2-dev libpcre2-dev libicu-dev`` to install the required components.
|
||||||
|
3. On Windows systems, you need to set the **JAVA_HOME** environment variable.
|
||||||
|
:::
|
||||||
|
|
||||||
|
3. Download the TDengine JDBC driver: Visit the Maven website and download the TDengine JDBC driver (taos-jdbcdriver-X.X.X-dist.jar) to your local machine.
|
||||||
|
|
||||||
|
## Configuration Process
|
||||||
|
|
||||||
|
Once you have completed the installation steps, you need to do some configuration to enable the RJDBC library to connect and access the TDengine time-series database.
|
||||||
|
|
||||||
|
1. Load the RJDBC library and other necessary libraries in your R script:
|
||||||
|
|
||||||
|
```r
|
||||||
|
library(DBI)
|
||||||
|
library(rJava)
|
||||||
|
library(RJDBC)
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Set the JDBC driver and JDBC URL:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Set the JDBC driver path (specify the location on your local machine)
|
||||||
|
driverPath <- "/path/to/taos-jdbcdriver-X.X.X-dist.jar"
|
||||||
|
|
||||||
|
# Set the JDBC URL (specify the FQDN and credentials of your TDengine cluster)
|
||||||
|
url <- "jdbc:TAOS://localhost:6030/?user=root&password=taosdata"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Load the JDBC driver:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Load the JDBC driver
|
||||||
|
drv <- JDBC("com.taosdata.jdbc.TSDBDriver", driverPath)
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Create a TDengine database connection:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Create a database connection
|
||||||
|
conn <- dbConnect(drv, url)
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Once the connection is established, you can use the ``conn`` object for various database operations such as querying data and inserting data.
|
||||||
|
|
||||||
|
6. Finally, don't forget to close the database connection after you are done:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Close the database connection
|
||||||
|
dbDisconnect(conn)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Code Using RJDBC in R
|
||||||
|
|
||||||
|
Here's an example code that uses the RJDBC library to connect to a TDengine time-series database and perform a query operation:
|
||||||
|
|
||||||
|
<Rdemo/>
|
||||||
|
|
||||||
|
Please modify the JDBC driver, JDBC URL, username, password, and SQL query statement according to your specific TDengine time-series database environment and requirements.
|
||||||
|
|
||||||
|
By following the steps and using the provided example code, you can use the RJDBC library in the R language to access the TDengine time-series database and perform tasks such as data querying and analysis.
|
|
@ -48,7 +48,6 @@ Comparing the connector support for TDengine functional features as follows.
|
||||||
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
|
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
|
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Schemaless** | Support | Support | Support | Support | Support | Support |
|
| **Schemaless** | Support | Support | Support | Support | Support | Support |
|
||||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
|
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
|
||||||
|
@ -60,11 +59,10 @@ The different database framework specifications for various programming language
|
||||||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
| **Parameter Binding** | Supported | Supported | Support | Support | Not Supported | Support |
|
||||||
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||||
| **Schemaless** | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
| **Schemaless** | Supported | Supported | Supported | Not Supported | Not Supported | Not Supported |
|
||||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
|
|
|
@ -364,6 +364,7 @@ The configuration parameters for specifying super table tag columns and data col
|
||||||
- **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value.
|
- **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value.
|
||||||
|
|
||||||
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value.
|
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value.
|
||||||
|
- **fun**: This column of data is filled with functions. Currently, only the sin and cos functions are supported. The input parameter is the timestamp and converted to an angle value. The conversion formula is: angle x=input time column ts value % 360. At the same time, it supports coefficient adjustment and random fluctuation factor adjustment, presented in a fixed format expression, such as fun="10\*sin(x)+100\*random(5)", where x represents the angle, ranging from 0 to 360 degrees, and the growth step size is consistent with the time column step size. 10 represents the coefficient of multiplication, 100 represents the coefficient of addition or subtraction, and 5 represents the fluctuation range within a random range of 5%. The currently supported data types are int, bigint, float, and double. Note: The expression is fixed and cannot be reversed.
|
||||||
|
|
||||||
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
|
||||||
|
|
||||||
|
@ -470,3 +471,26 @@ The configuration parameters for subscribing to a super table are set in `super_
|
||||||
- **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table.
|
- **sql**: The SQL command to be executed. For the query SQL of super table, keep "xxxx" in the SQL command. The program will automatically replace it with all the sub-table names of the super table.
|
||||||
Replace it with all the sub-table names in the super table.
|
Replace it with all the sub-table names in the super table.
|
||||||
- **result**: The file to save the query result. If not specified, taosBenchmark will not save result.
|
- **result**: The file to save the query result. If not specified, taosBenchmark will not save result.
|
||||||
|
|
||||||
|
#### data type on taosBenchmark
|
||||||
|
|
||||||
|
| # | **TDengine** | **taosBenchmark**
|
||||||
|
| --- | :----------------: | :---------------:
|
||||||
|
| 1 | TIMESTAMP | timestamp
|
||||||
|
| 2 | INT | int
|
||||||
|
| 3 | INT UNSIGNED | uint
|
||||||
|
| 4 | BIGINT | bigint
|
||||||
|
| 5 | BIGINT UNSIGNED | ubigint
|
||||||
|
| 6 | FLOAT | float
|
||||||
|
| 7 | DOUBLE | double
|
||||||
|
| 8 | BINARY | binary
|
||||||
|
| 9 | SMALLINT | smallint
|
||||||
|
| 10 | SMALLINT UNSIGNED | usmallint
|
||||||
|
| 11 | TINYINT | tinyint
|
||||||
|
| 12 | TINYINT UNSIGNED | utinyint
|
||||||
|
| 13 | BOOL | bool
|
||||||
|
| 14 | NCHAR | nchar
|
||||||
|
| 15 | VARCHAR | varchar
|
||||||
|
| 15 | JSON | json
|
||||||
|
|
||||||
|
note:Lowercase characters must be used on taosBenchmark datatype
|
||||||
|
|
|
@ -79,8 +79,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
||||||
-e, --escape-character Use escaped character for database name
|
-e, --escape-character Use escaped character for database name
|
||||||
-N, --without-property Dump database without its properties.
|
-N, --without-property Dump database without its properties.
|
||||||
-s, --schemaonly Only dump table schemas.
|
-s, --schemaonly Only dump table schemas.
|
||||||
-y, --answer-yes Input yes for prompt. It will skip data file
|
|
||||||
checking!
|
|
||||||
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
|
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
|
||||||
and lzma.
|
and lzma.
|
||||||
-S, --start-time=START_TIME Start time to dump. Either epoch or
|
-S, --start-time=START_TIME Start time to dump. Either epoch or
|
||||||
|
|
|
@ -149,7 +149,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
||||||
Use the `uid` value obtained above as `-E` input.
|
Use the `uid` value obtained above as `-E` input.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||||
|
|
|
@ -5,12 +5,12 @@ description: This document describes the supported platforms for the TDengine se
|
||||||
|
|
||||||
## List of supported platforms for TDengine server
|
## List of supported platforms for TDengine server
|
||||||
|
|
||||||
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18/20** | **macOS** |
|
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 or later** | **macOS** |
|
||||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
|
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
|
||||||
| X64 | ● | ● | ● | ● | ● |
|
| X64 | ●/E | ●/E | ● | ● | ● |
|
||||||
| ARM64 | | | ● | | ● |
|
| ARM64 | | | ● | | ● |
|
||||||
|
|
||||||
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by the enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition.
|
||||||
|
|
||||||
## List of supported platforms for TDengine clients and connectors
|
## List of supported platforms for TDengine clients and connectors
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
label: TDengine Docker images
|
|
|
@ -5,7 +5,7 @@ description: This document describes the configuration parameters for the TDengi
|
||||||
|
|
||||||
## Configuration File on Server Side
|
## Configuration File on Server Side
|
||||||
|
|
||||||
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
|
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos` on Linux system, it's located under `C:\TDengine` on Windows system. The location of configuration file can be specified by using `-c` parameter on the CLI of `taosd`. For example, on Linux system the configuration file can be put under `/home/user` and used like below
|
||||||
|
|
||||||
```
|
```
|
||||||
taosd -c /home/user
|
taosd -c /home/user
|
||||||
|
@ -19,16 +19,20 @@ taosd -C
|
||||||
|
|
||||||
## Configuration File on Client Side
|
## Configuration File on Client Side
|
||||||
|
|
||||||
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
|
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
taos -c /home/cfg
|
||||||
|
```
|
||||||
|
|
||||||
|
means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
|
||||||
|
|
||||||
|
Parameter `-C` can be used on the CLI of `taos` to show its configuration, like below:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taos -C
|
taos -C
|
||||||
```
|
```
|
||||||
|
|
||||||
```bash
|
|
||||||
taos --dump-config
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration Parameters
|
## Configuration Parameters
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -46,7 +50,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### firstEp
|
### firstEp
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------------------- |
|
| ---------- | ---------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
||||||
| Default | localhost:6030 |
|
| Default | localhost:6030 |
|
||||||
|
@ -54,7 +58,7 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### secondEp
|
### secondEp
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------- |
|
| ---------- | ---------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
||||||
| Default | None |
|
| Default | None |
|
||||||
|
@ -71,21 +75,22 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
### serverPort
|
### serverPort
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------------------------------------------------------------------------------------------------- |
|
| ------------- | ----------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The port for external access after `taosd` is started |
|
| Meaning | The port for external access after `taosd` is started |
|
||||||
| Default Value | 6030 |
|
| Default Value | 6030 |
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
| Protocol | Default Port | Description | How to configure |
|
| Protocol | Default Port | Description | How to configure |
|
||||||
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||||
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
||||||
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
||||||
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
||||||
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
|
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
|
||||||
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
|
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |
|
||||||
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
||||||
|
|
||||||
### maxShellConns
|
### maxShellConns
|
||||||
|
@ -97,24 +102,44 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Value Range | 10-50000000 |
|
| Value Range | 10-50000000 |
|
||||||
| Default Value | 5000 |
|
| Default Value | 5000 |
|
||||||
|
|
||||||
|
### numOfRpcSessions
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ------------------------------------------ |
|
||||||
|
| Applicable | Client/Server |
|
||||||
|
| Meaning | The maximum number of connection to create |
|
||||||
|
| Value Range | 100-100000 |
|
||||||
|
| Default Value | 10000 |
|
||||||
|
|
||||||
|
### timeToGetAvailableConn
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ---------------------------------------------- |
|
||||||
|
| Applicable | Client/Server |
|
||||||
|
| Meaning | The maximum waiting time to get available conn |
|
||||||
|
| Value Range | 10-50000000(ms) |
|
||||||
|
| Default Value | 500000 |
|
||||||
|
|
||||||
## Monitoring Parameters
|
## Monitoring Parameters
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution.
|
Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
### monitor
|
### monitor
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
|
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`. |
|
||||||
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
||||||
| Default | 0 |
|
| Default | 0 |
|
||||||
|
|
||||||
### monitorFqdn
|
### monitorFqdn
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------- |
|
| ---------- | ------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | FQDN of taosKeeper monitoring service |
|
| Meaning | FQDN of taosKeeper monitoring service |
|
||||||
| Default | None |
|
| Default | None |
|
||||||
|
@ -122,7 +147,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### monitorPort
|
### monitorPort
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------------- |
|
| ------------- | ------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Port of taosKeeper monitoring service |
|
| Meaning | Port of taosKeeper monitoring service |
|
||||||
| Default Value | 6043 |
|
| Default Value | 6043 |
|
||||||
|
@ -130,7 +155,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### monitorInterval
|
### monitorInterval
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The interval of collecting system workload |
|
| Meaning | The interval of collecting system workload |
|
||||||
| Unit | second |
|
| Unit | second |
|
||||||
|
@ -140,17 +165,17 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### telemetryReporting
|
### telemetryReporting
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------------------------- |
|
| ------------- | ---------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||||
| Value Range | 0: Not allowed; 1: Allowed |
|
| Value Range | 0: Not allowed; 1: Allowed |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
### crashReporting
|
### crashReporting
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | ---------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server and Client |
|
||||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
| Meaning | Switch for allowing TDengine to collect and report crash related information |
|
||||||
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
@ -160,7 +185,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### queryPolicy
|
### queryPolicy
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Execution policy for query statements |
|
| Meaning | Execution policy for query statements |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
|
@ -170,17 +195,17 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### querySmaOptimize
|
### querySmaOptimize
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | SMA index optimization policy |
|
| Meaning | SMA index optimization policy |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
| Notes | 0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results. |
|
||||||
|
|
||||||
### countAlwaysReturnValue
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||||
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
||||||
|
@ -190,7 +215,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ------------- | -------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The maximum number of distinct rows returned |
|
| Meaning | The maximum number of distinct rows returned |
|
||||||
| Value Range | [100,000 - 100,000,000] |
|
| Value Range | [100,000 - 100,000,000] |
|
||||||
|
@ -199,7 +224,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### keepColumnName
|
### keepColumnName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ------------- | --------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||||
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||||
|
@ -210,7 +235,7 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
### timezone
|
### timezone
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------ |
|
| ------------- | ------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | TimeZone |
|
| Meaning | TimeZone |
|
||||||
| Default Value | TimeZone configured in the host |
|
| Default Value | TimeZone configured in the host |
|
||||||
|
@ -315,7 +340,7 @@ The charset that takes effect is UTF-8.
|
||||||
### dataDir
|
### dataDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------ |
|
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | All data files are stored in this directory |
|
| Meaning | All data files are stored in this directory |
|
||||||
| Default Value | /var/lib/taos |
|
| Default Value | /var/lib/taos |
|
||||||
|
@ -324,7 +349,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tempDir
|
### tempDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------ |
|
| ---------- | ---------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | The directory where to put all the temporary files generated during system running |
|
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||||
| Default | /tmp |
|
| Default | /tmp |
|
||||||
|
@ -332,7 +357,7 @@ The charset that takes effect is UTF-8.
|
||||||
### minimalTmpDirGB
|
### minimalTmpDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------ |
|
| ------------- | ----------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
|
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
|
@ -341,18 +366,28 @@ The charset that takes effect is UTF-8.
|
||||||
### minimalDataDirGB
|
### minimalDataDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
|
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
| Default Value | 2.0 |
|
| Default Value | 2.0 |
|
||||||
|
|
||||||
|
### metaCacheMaxSize
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ------------------------------------------------------------------------------------------------- |
|
||||||
|
| Applicable | Client Only |
|
||||||
|
| Meaning | Maximum meta cache size in single client process |
|
||||||
|
| Unit | MB |
|
||||||
|
| Default Value | -1 (No limitation) |
|
||||||
|
|
||||||
|
|
||||||
## Cluster Parameters
|
## Cluster Parameters
|
||||||
|
|
||||||
### supportVnodes
|
### supportVnodes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------------- |
|
| ------------- | ---------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Maximum number of vnodes per dnode |
|
| Meaning | Maximum number of vnodes per dnode |
|
||||||
| Value Range | 0-4096 |
|
| Value Range | 0-4096 |
|
||||||
|
@ -374,7 +409,7 @@ The charset that takes effect is UTF-8.
|
||||||
### logDir
|
### logDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------- |
|
| ------------- | ----------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The directory for writing log files |
|
| Meaning | The directory for writing log files |
|
||||||
| Default Value | /var/log/taos |
|
| Default Value | /var/log/taos |
|
||||||
|
@ -382,7 +417,7 @@ The charset that takes effect is UTF-8.
|
||||||
### minimalLogDirGB
|
### minimalLogDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | -------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
|
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
|
@ -391,7 +426,7 @@ The charset that takes effect is UTF-8.
|
||||||
### numOfLogLines
|
### numOfLogLines
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Maximum number of lines in single log file |
|
| Meaning | Maximum number of lines in single log file |
|
||||||
| Default Value | 10000000 |
|
| Default Value | 10000000 |
|
||||||
|
@ -399,7 +434,7 @@ The charset that takes effect is UTF-8.
|
||||||
### asyncLog
|
### asyncLog
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ---------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The mode of writing log file |
|
| Meaning | The mode of writing log file |
|
||||||
| Value Range | 0: sync way; 1: async way |
|
| Value Range | 0: sync way; 1: async way |
|
||||||
|
@ -408,17 +443,37 @@ The charset that takes effect is UTF-8.
|
||||||
### logKeepDays
|
### logKeepDays
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------------------------------------------------------------- |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The number of days for log files to be kept |
|
| Meaning | The number of days for log files to be kept |
|
||||||
| Unit | day |
|
| Unit | day |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
|
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
|
||||||
|
|
||||||
|
### slowLogThreshold
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | -------------------------------------------------------------------------------------------------------- |
|
||||||
|
| Applicable | Client only |
|
||||||
|
| Meaning | When an operation execution time exceeds this threshold, the operation will be logged in slow log file |
|
||||||
|
| Unit | second |
|
||||||
|
| Default Value | 3 |
|
||||||
|
| Note | All slow operations will be logged in file "taosSlowLog" in the log directory |
|
||||||
|
|
||||||
|
### slowLogScope
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| --------------- | ----------------------------------------------------------------------- |
|
||||||
|
| Applicable | Client only |
|
||||||
|
| Meaning | Slow log type to be logged |
|
||||||
|
| Optional Values | ALL, QUERY, INSERT, OTHERS, NONE |
|
||||||
|
| Default Value | ALL |
|
||||||
|
| Note | All slow operations will be logged by default, one option could be set |
|
||||||
|
|
||||||
### debugFlag
|
### debugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------- |
|
| ------------- | --------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level |
|
| Meaning | Log level |
|
||||||
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
|
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
|
||||||
|
@ -427,7 +482,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tmrDebugFlag
|
### tmrDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of timer module |
|
| Meaning | Log level of timer module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -436,7 +491,7 @@ The charset that takes effect is UTF-8.
|
||||||
### uDebugFlag
|
### uDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------- |
|
| ------------- | -------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of common module |
|
| Meaning | Log level of common module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -445,7 +500,7 @@ The charset that takes effect is UTF-8.
|
||||||
### rpcDebugFlag
|
### rpcDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of rpc module |
|
| Meaning | Log level of rpc module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -454,7 +509,7 @@ The charset that takes effect is UTF-8.
|
||||||
### jniDebugFlag
|
### jniDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Client Only |
|
| Applicable | Client Only |
|
||||||
| Meaning | Log level of jni module |
|
| Meaning | Log level of jni module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -463,7 +518,7 @@ The charset that takes effect is UTF-8.
|
||||||
### qDebugFlag
|
### qDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of query module |
|
| Meaning | Log level of query module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -472,7 +527,7 @@ The charset that takes effect is UTF-8.
|
||||||
### cDebugFlag
|
### cDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------- |
|
| ------------- | ------------------- |
|
||||||
| Applicable | Client Only |
|
| Applicable | Client Only |
|
||||||
| Meaning | Log level of Client |
|
| Meaning | Log level of Client |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -481,7 +536,7 @@ The charset that takes effect is UTF-8.
|
||||||
### dDebugFlag
|
### dDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of dnode |
|
| Meaning | Log level of dnode |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -490,7 +545,7 @@ The charset that takes effect is UTF-8.
|
||||||
### vDebugFlag
|
### vDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of vnode |
|
| Meaning | Log level of vnode |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -499,7 +554,7 @@ The charset that takes effect is UTF-8.
|
||||||
### mDebugFlag
|
### mDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of mnode module |
|
| Meaning | Log level of mnode module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -508,7 +563,7 @@ The charset that takes effect is UTF-8.
|
||||||
### wDebugFlag
|
### wDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of WAL module |
|
| Meaning | Log level of WAL module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -517,7 +572,7 @@ The charset that takes effect is UTF-8.
|
||||||
### sDebugFlag
|
### sDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of sync module |
|
| Meaning | Log level of sync module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -526,7 +581,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tsdbDebugFlag
|
### tsdbDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------- |
|
| ------------- | ------------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of TSDB module |
|
| Meaning | Log level of TSDB module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -535,7 +590,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tqDebugFlag
|
### tqDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------- |
|
| ------------- | ---------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | Log level of TQ module |
|
| Meaning | Log level of TQ module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -544,7 +599,7 @@ The charset that takes effect is UTF-8.
|
||||||
### fsDebugFlag
|
### fsDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------- |
|
| ------------- | ---------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | Log level of FS module |
|
| Meaning | Log level of FS module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -553,7 +608,7 @@ The charset that takes effect is UTF-8.
|
||||||
### udfDebugFlag
|
### udfDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of UDF module |
|
| Meaning | Log level of UDF module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -562,7 +617,7 @@ The charset that takes effect is UTF-8.
|
||||||
### smaDebugFlag
|
### smaDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of SMA module |
|
| Meaning | Log level of SMA module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -571,7 +626,7 @@ The charset that takes effect is UTF-8.
|
||||||
### idxDebugFlag
|
### idxDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of index module |
|
| Meaning | Log level of index module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -580,7 +635,7 @@ The charset that takes effect is UTF-8.
|
||||||
### tdbDebugFlag
|
### tdbDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of TDB module |
|
| Meaning | Log level of TDB module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
|
@ -591,7 +646,7 @@ The charset that takes effect is UTF-8.
|
||||||
### smlChildTableName
|
### smlChildTableName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Custom subtable name for schemaless writes |
|
| Meaning | Custom subtable name for schemaless writes |
|
||||||
| Type | String |
|
| Type | String |
|
||||||
|
@ -600,7 +655,7 @@ The charset that takes effect is UTF-8.
|
||||||
### smlTagName
|
### smlTagName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------ |
|
| ------------- | ------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Default tag for schemaless writes without tag value specified |
|
| Meaning | Default tag for schemaless writes without tag value specified |
|
||||||
| Type | String |
|
| Type | String |
|
||||||
|
@ -609,18 +664,27 @@ The charset that takes effect is UTF-8.
|
||||||
### smlDataFormat
|
### smlDataFormat
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | ----------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
|
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 |
|
||||||
| Value Range | 0: not consistent; 1: consistent. |
|
| Value Range | 0: not consistent; 1: consistent. |
|
||||||
| Default | 0 |
|
| Default | 0 |
|
||||||
|
|
||||||
|
### smlTsDefaultName
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | -------------------------------------------------------- |
|
||||||
|
| Applicable | Client only |
|
||||||
|
| Meaning | The name of the time column for schemaless automatic table creation is set through this configuration |
|
||||||
|
| Type | String |
|
||||||
|
| Default Value | _ts |
|
||||||
|
|
||||||
## Compress Parameters
|
## Compress Parameters
|
||||||
|
|
||||||
### compressMsgSize
|
### compressMsgSize
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | ------------------------------------------------------------------------------------------------------------------ |
|
||||||
| Applicable | Both Client and Server side |
|
| Applicable | Both Client and Server side |
|
||||||
| Meaning | Whether RPC message is compressed |
|
| Meaning | Whether RPC message is compressed |
|
||||||
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
||||||
|
@ -632,7 +696,7 @@ The charset that takes effect is UTF-8.
|
||||||
### enableCoreFile
|
### enableCoreFile
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Whether to generate core file when server crashes |
|
| Meaning | Whether to generate core file when server crashes |
|
||||||
| Value Range | 0: false, 1: true |
|
| Value Range | 0: false, 1: true |
|
||||||
|
@ -642,7 +706,7 @@ The charset that takes effect is UTF-8.
|
||||||
### enableScience
|
### enableScience
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------- |
|
||||||
| Applicable | Only taos-CLI client |
|
| Applicable | Only taos-CLI client |
|
||||||
| Meaning | Whether to show float and double with the scientific notation |
|
| Meaning | Whether to show float and double with the scientific notation |
|
||||||
| Value Range | 0: false, 1: true |
|
| Value Range | 0: false, 1: true |
|
||||||
|
@ -652,17 +716,44 @@ The charset that takes effect is UTF-8.
|
||||||
### udf
|
### udf
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ---------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Whether the UDF service is enabled |
|
| Meaning | Whether the UDF service is enabled |
|
||||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
### ttlChangeOnWrite
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ----------------------------------------------------------------------------- |
|
||||||
|
| Applicable | Server Only |
|
||||||
|
| Meaning | Whether the ttl expiration time changes with the table modification operation |
|
||||||
|
| Value Range | 0: not change; 1: change by modification |
|
||||||
|
| Default Value | 0 |
|
||||||
|
|
||||||
|
### keepTimeOffset
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ------------------------- |
|
||||||
|
| Applicable | Server Only |
|
||||||
|
| Meaning | Latency of data migration |
|
||||||
|
| Unit | hour |
|
||||||
|
| Value Range | 0-23 |
|
||||||
|
| Default Value | 0 |
|
||||||
|
|
||||||
|
### tmqMaxTopicNum
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| -------- | ------------------ |
|
||||||
|
| Applicable | Server Only |
|
||||||
|
| Meaning | The max num of topics |
|
||||||
|
| Value Range | 1-10000|
|
||||||
|
| Default Value | 20 |
|
||||||
|
|
||||||
## 3.0 Parameters
|
## 3.0 Parameters
|
||||||
|
|
||||||
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :--------------------: | ---------------------- | ---------------------------- | ----------------------- |
|
||||||
| 1 | firstEp | Yes | Yes | |
|
| 1 | firstEp | Yes | Yes | |
|
||||||
| 2 | secondEp | Yes | Yes | |
|
| 2 | secondEp | Yes | Yes | |
|
||||||
| 3 | fqdn | Yes | Yes | |
|
| 3 | fqdn | Yes | Yes | |
|
||||||
|
@ -715,3 +806,5 @@ The charset that takes effect is UTF-8.
|
||||||
| 52 | charset | Yes | Yes | |
|
| 52 | charset | Yes | Yes | |
|
||||||
| 53 | udf | Yes | Yes | |
|
| 53 | udf | Yes | Yes | |
|
||||||
| 54 | enableCoreFile | Yes | Yes | |
|
| 54 | enableCoreFile | Yes | Yes | |
|
||||||
|
| 55 | ttlChangeOnWrite | No | Yes | |
|
||||||
|
| 56 | keepTimeOffset | Yes | Yes | |
|
||||||
|
|
|
@ -34,7 +34,27 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
||||||
|
|
||||||
- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`.
|
- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`.
|
||||||
- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`.
|
- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`.
|
||||||
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
|
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
|
||||||
|
|
||||||
|
| **Serial number** | **Element** | **Escape characters** |
|
||||||
|
| -------- | ----------- | ----------------------------- |
|
||||||
|
| 1 | Measurement | Comma, Space |
|
||||||
|
| 2 | Tag key | Comma, Equals Sign, Space |
|
||||||
|
| 3 | Tag value | Comma, Equals Sign, Space |
|
||||||
|
| 4 | Field key | Comma, Equals Sign, Space |
|
||||||
|
| 5 | Field value | Double quote, Backslash |
|
||||||
|
|
||||||
|
With two contiguous backslashes, the first is interpreted as an escape character. Examples of backslash escape rules are as follows:
|
||||||
|
|
||||||
|
| **Serial number** | **Backslashes** | **Interpreted as** |
|
||||||
|
| -------- | ----------- | ----------------------------- |
|
||||||
|
| 1 | \ | \ |
|
||||||
|
| 2 | \\\\ | \ |
|
||||||
|
| 3 | \\\\\\ | \\\\ |
|
||||||
|
| 4 | \\\\\\\\ | \\\\ |
|
||||||
|
| 5 | \\\\\\\\\\ | \\\\\\ |
|
||||||
|
| 6 | \\\\\\\\\\\\ | \\\\\\ |
|
||||||
|
|
||||||
- Numeric types will be distinguished from data types by the suffix.
|
- Numeric types will be distinguished from data types by the suffix.
|
||||||
|
|
||||||
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
|
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
|
||||||
|
@ -88,9 +108,11 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
||||||
|
|
||||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.
|
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.
|
||||||
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
||||||
|
9. Due to the fact that SQL table names do not support period (.), schemaless has also processed period (.). If there is a period (.) in the table name automatically created by schemaless, it will be automatically replaced with an underscore (\_). If you manually specify a sub table name, if there is a dot (.) in the sub table name, it will also be converted to an underscore (\_)
|
||||||
|
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Time resolution recognition
|
## Time resolution recognition
|
||||||
|
|
|
@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## What is Confluent?
|
|
||||||
|
|
||||||
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
|
|
||||||
|
|
||||||
1. Schema Registry
|
|
||||||
2. REST Proxy
|
|
||||||
3. Non-Java Clients
|
|
||||||
4. Many packaged Kafka Connect plugins
|
|
||||||
5. GUI for managing and monitoring Kafka - Confluent Control Center
|
|
||||||
|
|
||||||
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
|
|
||||||

|
|
||||||
|
|
||||||
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
|
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
1. Linux operating system
|
1. Linux operating system
|
||||||
2. Java 8 and Maven installed
|
2. Java 8 and Maven installed
|
||||||
3. Git is installed
|
3. Git/curl/vi is installed
|
||||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
||||||
|
|
||||||
## Install Confluent
|
## Install Kafka
|
||||||
|
|
||||||
Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
|
|
||||||
|
|
||||||
Execute in any directory:
|
Execute in any directory:
|
||||||
|
|
||||||
````
|
```shell
|
||||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||||
````
|
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||||
|
```
|
||||||
|
|
||||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
|
||||||
|
|
||||||
```title=".profile"
|
```title=".profile"
|
||||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
export KAFKA_HOME=/opt/kafka
|
||||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
export PATH=$PATH:$KAFKA_HOME/bin
|
||||||
```
|
```
|
||||||
|
|
||||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||||
|
|
||||||
After the installation is complete, you can enter `confluent version` for simple verification:
|
|
||||||
|
|
||||||
```
|
|
||||||
# confluent version
|
|
||||||
confluent - Confluent CLI
|
|
||||||
|
|
||||||
Version: v2.6.1
|
|
||||||
Git Ref: 6d920590
|
|
||||||
Build Date: 2022-02-18T06:14:21Z
|
|
||||||
Go Version: go1.17.6 (linux/amd64)
|
|
||||||
Development: false
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install TDengine Connector plugin
|
## Install TDengine Connector plugin
|
||||||
|
|
||||||
### Install from source code
|
### Install from source code
|
||||||
|
|
||||||
```
|
```shell
|
||||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||||
cd kafka-connect-tdengine
|
cd kafka-connect-tdengine
|
||||||
mvn clean package
|
mvn clean package -Dmaven.test.skip=true
|
||||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||||
```
|
```
|
||||||
|
|
||||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
|
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
|
||||||
|
|
||||||
### Install with confluent-hub
|
### Add configuration file
|
||||||
|
|
||||||
[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`.
|
add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
|
||||||
**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
|
|
||||||
|
|
||||||
## Start Confluent
|
```properties
|
||||||
|
plugin.path=/usr/share/java,/opt/kafka/components
|
||||||
```
|
|
||||||
confluent local services start
|
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
## Start Kafka Services
|
||||||
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
|
|
||||||
:::
|
|
||||||
|
|
||||||
:::tip
|
Use command bellow to start all services:
|
||||||
If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
|
|
||||||
|
|
||||||
```title="Console output log" {1}
|
```shell
|
||||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||||
Starting ZooKeeper
|
|
||||||
ZooKeeper is [UP]
|
|
||||||
Starting Kafka
|
|
||||||
Kafka is [UP]
|
|
||||||
Starting Schema Registry
|
|
||||||
Schema Registry is [UP]
|
|
||||||
Starting Kafka REST
|
|
||||||
Kafka REST is [UP]
|
|
||||||
Starting Connect
|
|
||||||
Connect is [UP]
|
|
||||||
Starting ksqlDB Server
|
|
||||||
ksqlDB Server is [UP]
|
|
||||||
Starting Control Center
|
|
||||||
Control Center is [UP]
|
|
||||||
```
|
|
||||||
|
|
||||||
To clear data, execute `rm -rf /tmp/confluent.106668`.
|
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||||
:::
|
|
||||||
|
|
||||||
### Check Confluent Services Status
|
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||||
|
|
||||||
Use command bellow to check the status of all service:
|
|
||||||
|
|
||||||
```
|
|
||||||
confluent local services status
|
|
||||||
```
|
|
||||||
|
|
||||||
The expected output is:
|
|
||||||
```
|
|
||||||
Connect is [UP]
|
|
||||||
Control Center is [UP]
|
|
||||||
Kafka is [UP]
|
|
||||||
Kafka REST is [UP]
|
|
||||||
ksqlDB Server is [UP]
|
|
||||||
Schema Registry is [UP]
|
|
||||||
ZooKeeper is [UP]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Check Successfully Loaded Plugin
|
### Check Successfully Loaded Plugin
|
||||||
|
|
||||||
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
|
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
|
||||||
```
|
|
||||||
confluent local services connect plugin list
|
```shell
|
||||||
|
curl http://localhost:8083/connectors
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
|
The output as bellow:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
[]
|
||||||
```
|
```
|
||||||
Available Connect Plugins:
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
|
||||||
"type": "sink",
|
|
||||||
"version": "1.0.0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
|
||||||
"type": "source",
|
|
||||||
"version": "1.0.0"
|
|
||||||
},
|
|
||||||
......
|
|
||||||
```
|
|
||||||
|
|
||||||
If not, please check the log file of Kafka Connect. To view the log file path, please execute:
|
|
||||||
|
|
||||||
```
|
|
||||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
|
||||||
```
|
|
||||||
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
|
|
||||||
|
|
||||||
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
|
|
||||||
|
|
||||||
## The use of TDengine Sink Connector
|
## The use of TDengine Sink Connector
|
||||||
|
|
||||||
|
@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
|
||||||
|
|
||||||
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
||||||
|
|
||||||
### Add configuration file
|
### Add Sink Connector configuration file
|
||||||
|
|
||||||
```
|
```shell
|
||||||
mkdir ~/test
|
mkdir ~/test
|
||||||
cd ~/test
|
cd ~/test
|
||||||
vi sink-demo.properties
|
vi sink-demo.json
|
||||||
```
|
```
|
||||||
|
|
||||||
sink-demo.properties' content is following:
|
sink-demo.json' content is following:
|
||||||
|
|
||||||
```ini title="sink-demo.properties"
|
```json title="sink-demo.json"
|
||||||
name=TDengineSinkConnector
|
{
|
||||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
"name": "TDengineSinkConnector",
|
||||||
tasks.max=1
|
"config": {
|
||||||
topics=meters
|
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
"tasks.max": "1",
|
||||||
connection.user=root
|
"topics": "meters",
|
||||||
connection.password=taosdata
|
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||||
connection.database=power
|
"connection.user": "root",
|
||||||
db.schemaless=line
|
"connection.password": "taosdata",
|
||||||
data.precision=ns
|
"connection.database": "power",
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
"db.schemaless": "line",
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
"data.precision": "ns",
|
||||||
|
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"errors.tolerance": "all",
|
||||||
|
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||||
|
"errors.deadletterqueue.topic.replication.factor": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Key configuration instructions:
|
Key configuration instructions:
|
||||||
|
|
||||||
1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power.
|
1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
|
||||||
2. `db.schemaless=line` means the data in the InfluxDB Line protocol format.
|
2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
|
||||||
|
|
||||||
### Create Connector instance
|
### Create Sink Connector instance
|
||||||
|
|
||||||
````
|
````shell
|
||||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||||
````
|
````
|
||||||
|
|
||||||
If the above command is executed successfully, the output is as follows:
|
If the above command is executed successfully, the output is as follows:
|
||||||
|
@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
|
||||||
"tasks.max": "1",
|
"tasks.max": "1",
|
||||||
"topics": "meters",
|
"topics": "meters",
|
||||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
"name": "TDengineSinkConnector"
|
"name": "TDengineSinkConnector",
|
||||||
|
"errors.tolerance": "all",
|
||||||
|
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||||
|
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||||
},
|
},
|
||||||
"tasks": [],
|
"tasks": [],
|
||||||
"type": "sink"
|
"type": "sink"
|
||||||
|
@ -257,8 +181,8 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
|
||||||
|
|
||||||
Use kafka-console-producer to write test data to the topic `meters`.
|
Use kafka-console-producer to write test data to the topic `meters`.
|
||||||
|
|
||||||
```
|
```shell
|
||||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
|
||||||
|
|
||||||
Use the TDengine CLI to verify that the sync was successful.
|
Use the TDengine CLI to verify that the sync was successful.
|
||||||
|
|
||||||
```
|
```sql
|
||||||
taos> use power;
|
taos> use power;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> select * from meters;
|
taos> select * from meters;
|
||||||
ts | current | voltage | phase | groupid | location |
|
_ts | current | voltage | phase | groupid | location |
|
||||||
===============================================================================================================================================================
|
===============================================================================================================================================================
|
||||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||||
|
@ -291,32 +215,39 @@ The role of the TDengine Source Connector is to push all the data of a specific
|
||||||
|
|
||||||
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
|
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
|
||||||
|
|
||||||
The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
|
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
|
||||||
|
|
||||||
### Add configuration file
|
### Add Source Connector configuration file
|
||||||
|
|
||||||
```
|
```shell
|
||||||
vi source-demo.properties
|
vi source-demo.json
|
||||||
```
|
```
|
||||||
|
|
||||||
Input following content:
|
Input following content:
|
||||||
|
|
||||||
```ini title="source-demo.properties"
|
```json title="source-demo.json"
|
||||||
name=TDengineSourceConnector
|
{
|
||||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
"name":"TDengineSourceConnector",
|
||||||
tasks.max=1
|
"config":{
|
||||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||||
connection.username=root
|
"tasks.max": 1,
|
||||||
connection.password=taosdata
|
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||||
connection.database=test
|
"connection.username": "root",
|
||||||
connection.attempts=3
|
"connection.password": "taosdata",
|
||||||
connection.backoff.ms=5000
|
"connection.database": "test",
|
||||||
topic.prefix=tdengine-source-
|
"connection.attempts": 3,
|
||||||
poll.interval.ms=1000
|
"connection.backoff.ms": 5000,
|
||||||
fetch.max.rows=100
|
"topic.prefix": "tdengine",
|
||||||
out.format=line
|
"topic.delimiter": "-",
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
"poll.interval.ms": 1000,
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
"fetch.max.rows": 100,
|
||||||
|
"topic.per.stable": true,
|
||||||
|
"topic.ignore.db": false,
|
||||||
|
"out.format": "line",
|
||||||
|
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Prepare test data
|
### Prepare test data
|
||||||
|
@ -341,40 +272,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
|
||||||
|
|
||||||
Use TDengine CLI to execute SQL script
|
Use TDengine CLI to execute SQL script
|
||||||
|
|
||||||
```
|
```shell
|
||||||
taos -f prepare-source-data.sql
|
taos -f prepare-source-data.sql
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create Connector instance
|
### Create Connector instance
|
||||||
|
|
||||||
````
|
```shell
|
||||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||||
````
|
```
|
||||||
|
|
||||||
### View topic data
|
### View topic data
|
||||||
|
|
||||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data.
|
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-test-meters. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||||
|
|
||||||
````
|
````shell
|
||||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
|
||||||
````
|
````
|
||||||
|
|
||||||
output:
|
output:
|
||||||
|
|
||||||
````
|
```txt
|
||||||
......
|
......
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||||
......
|
......
|
||||||
````
|
```
|
||||||
|
|
||||||
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
|
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
|
||||||
|
|
||||||
````
|
```sql
|
||||||
USE test;
|
USE test;
|
||||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||||
````
|
```
|
||||||
|
|
||||||
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
|
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
|
||||||
|
|
||||||
|
@ -384,16 +315,16 @@ After testing, use the unload command to stop the loaded connector.
|
||||||
|
|
||||||
View currently active connectors:
|
View currently active connectors:
|
||||||
|
|
||||||
````
|
```shell
|
||||||
confluent local services connect connector status
|
curl http://localhost:8083/connectors
|
||||||
````
|
```
|
||||||
|
|
||||||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||||
|
|
||||||
````
|
```shell
|
||||||
confluent local services connect connector unload TDengineSinkConnector
|
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||||
````
|
```
|
||||||
|
|
||||||
## Configuration reference
|
## Configuration reference
|
||||||
|
|
||||||
|
@ -424,24 +355,27 @@ The following configuration items apply to TDengine Sink Connector and TDengine
|
||||||
### TDengine Source Connector specific configuration
|
### TDengine Source Connector specific configuration
|
||||||
|
|
||||||
1. `connection.database`: source database name, no default value.
|
1. `connection.database`: source database name, no default value.
|
||||||
2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "".
|
2. `topic.prefix`: topic name prefix used when importing data into kafka. Its defaults value is empty string "".
|
||||||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00".
|
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
||||||
4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000.
|
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
||||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
|
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
||||||
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
|
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 0, this means to get all the data to the latest time.
|
||||||
|
7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
|
||||||
|
8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database>`.
|
||||||
|
9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is `<topic.prefix><topic.delimiter><stable.name>`, false indicates that the rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`, and the default is false. Does not take effect when `topic.per.stable` is set to false.
|
||||||
|
10. `topic.delimiter`: topic name delimiter,default is `-`.
|
||||||
|
11. `read.method`: read method for query TDengine data, query or subscription. default is subscription.
|
||||||
|
12. `subscription.group.id`: subscription group id for subscription data from TDengine, this field is required when `read.method` is subscription.
|
||||||
|
13. `subscription.from`: subscription from latest or earliest. default is latest。
|
||||||
|
|
||||||
## Other notes
|
## Other notes
|
||||||
|
|
||||||
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
|
1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
|
||||||
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
|
|
||||||
|
|
||||||
## Feedback
|
## Feedback
|
||||||
|
|
||||||
https://github.com/taosdata/kafka-connect-tdengine/issues
|
<https://github.com/taosdata/kafka-connect-tdengine/issues>
|
||||||
|
|
||||||
## Reference
|
## Reference
|
||||||
|
|
||||||
1. https://www.confluent.io/what-is-apache-kafka
|
1. For more information, see <https://kafka.apache.org/documentation/>
|
||||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
|
||||||
3. https://docs.confluent.io/platform/current/platform.html
|
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
---
|
||||||
|
sidebar_label: DBeaver
|
||||||
|
title: DBeaver
|
||||||
|
description: You can use DBeaver to access your data stored in TDengine and TDengine Cloud.
|
||||||
|
---
|
||||||
|
|
||||||
|
[DBeaver](https://dbeaver.io/) is a popular cross-platform database management tool that facilitates data management for developers, database administrators, data analysts, and other users. Starting from version 23.1.1, DBeaver natively supports TDengine and can be used to manage TDengine Cloud as well as TDengine clusters deployed on-premises.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To use DBeaver to manage TDengine, you need to prepare the following:
|
||||||
|
|
||||||
|
- Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps.
|
||||||
|
- If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual.
|
||||||
|
|
||||||
|
## Use DBeaver to access on-premises TDengine cluster
|
||||||
|
|
||||||
|
1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it.
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
4. Use DBeaver to select databases and tables and browse your data stored in TDengine.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
5. You can also manipulate TDengine data by executing SQL commands.
|
||||||
|
|
||||||
|

|
|
@ -0,0 +1,40 @@
|
||||||
|
---
|
||||||
|
sidebar_label: qStudio
|
||||||
|
title: qStudio
|
||||||
|
description: Step-by-Step Guide to Accessing TDengine Data with qStudio
|
||||||
|
---
|
||||||
|
|
||||||
|
qStudio is a free cross-platform SQL data analysis tool that allows easy browsing of tables, variables, functions, and configuration settings in a database. The latest version of qStudio includes built-in support for TDengine.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To connect TDengine using qStudio, you need to complete the following preparations:
|
||||||
|
|
||||||
|
- Install qStudio: qStudio supports major operating systems, including Windows, macOS, and Linux. Please ensure you download the correct installation package for your platform from the [download page](https://www.timestored.com/qstudio/download/).
|
||||||
|
- Set up TDengine instance: Make sure TDengine is installed and running correctly, and the taosAdapter is installed and running. For detailed information, refer to the taosAdapter User Manual.
|
||||||
|
|
||||||
|
## Connecting to TDengine with qStudio
|
||||||
|
|
||||||
|
1. Launch the qStudio application and select "Server" and then "Add Server..." from the menu. Choose TDengine from the Server Type dropdown.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
2. Configure the TDengine connection by entering the host address, port number, username, and password. If TDengine is deployed on the local machine, you can fill in the username and password only. The default username is "root," and the default password is "taosdata." Click "Test" to test the connection's availability. If the TDengine Java connector is not installed on the local machine, qStudio will prompt you to download and install it.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
3. Once connected successfully, the screen will display as shown below. If the connection fails, check that the TDengine service and taosAdapter are running correctly, and ensure that the host address, port number, username, and password are correct.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
4. Use qStudio to select databases and tables to browse data from the TDengine server.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
5. You can also perform operations on TDengine data by executing SQL commands.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
6. qStudio supports charting functions based on the data. For more information, please refer to the [qStudio documentation](https://www.timestored.com/qstudio/help).
|
||||||
|
|
||||||
|

|
After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 70 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 35 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 59 KiB |
After Width: | Height: | Size: 57 KiB |
After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 94 KiB |
After Width: | Height: | Size: 148 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 93 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 78 KiB |
|
@ -338,7 +338,7 @@ Remark:
|
||||||
Equivalent function: sum
|
Equivalent function: sum
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
|
Select sum(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: This function has no interpolation requirements, so it can be directly calculated.
|
Note: This function has no interpolation requirements, so it can be directly calculated.
|
||||||
|
|
|
@ -56,7 +56,7 @@ This error indicates that the client could not connect to the server. Perform th
|
||||||
|
|
||||||
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
|
7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
|
||||||
|
|
||||||
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `LD_LIBRARY_PATH` environment variable..
|
8. If you are using macOS, verify that `libtaos.dylib` is in the `/usr/local/lib` directory and `/usr/local/lib` is in the `DYLD_LIBRARY_PATH` environment variable..
|
||||||
|
|
||||||
9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
|
9. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
|
||||||
|
|
||||||
|
|
|
@ -6,10 +6,43 @@ description: This document provides download links for all released versions of
|
||||||
|
|
||||||
TDengine 3.x installation packages can be downloaded at the following links:
|
TDengine 3.x installation packages can be downloaded at the following links:
|
||||||
|
|
||||||
For TDengine 2.x installation packages by version, please visit [here](https://www.taosdata.com/all-downloads).
|
For TDengine 2.x installation packages by version, please visit [here](https://tdengine.com/downloads/historical/).
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.1.0.0
|
||||||
|
|
||||||
|
:::note IMPORTANT
|
||||||
|
- Once you upgrade to TDengine 3.1.0.0, you cannot roll back to any previous version of TDengine. Upgrading to 3.1.0.0 will alter your data such that it cannot be read by previous versions.
|
||||||
|
- You must remove all streams before upgrading to TDengine 3.1.0.0. If you upgrade a deployment that contains streams, the upgrade will fail and your deployment will become nonoperational.
|
||||||
|
:::
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.1.0.0" />
|
||||||
|
|
||||||
|
## 3.0.7.1
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.7.1" />
|
||||||
|
|
||||||
|
## 3.0.7.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.7.0" />
|
||||||
|
|
||||||
|
## 3.0.6.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.6.0" />
|
||||||
|
|
||||||
|
## 3.0.5.1
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.5.1" />
|
||||||
|
|
||||||
|
## 3.0.5.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.5.0" />
|
||||||
|
|
||||||
|
## 3.0.4.2
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.4.2" />
|
||||||
|
|
||||||
## 3.0.4.1
|
## 3.0.4.1
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.4.1" />
|
<Release type="tdengine" version="3.0.4.1" />
|
||||||
|
|
|
@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.5.2
|
||||||
|
|
||||||
|
<Release type="tools" version="2.5.2" />
|
||||||
|
|
||||||
|
## 2.5.1
|
||||||
|
|
||||||
|
<Release type="tools" version="2.5.1" />
|
||||||
|
|
||||||
## 2.5.0
|
## 2.5.0
|
||||||
|
|
||||||
<Release type="tools" version="2.5.0" />
|
<Release type="tools" version="2.5.0" />
|
||||||
|
|
|
@ -8,9 +8,13 @@ library("rJava")
|
||||||
library("RJDBC")
|
library("RJDBC")
|
||||||
|
|
||||||
args<- commandArgs(trailingOnly = TRUE)
|
args<- commandArgs(trailingOnly = TRUE)
|
||||||
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.0.0-dist.jar"
|
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar"
|
||||||
driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path)
|
driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path)
|
||||||
conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata")
|
conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata")
|
||||||
dbGetQuery(conn, "SELECT server_version()")
|
dbGetQuery(conn, "SELECT server_version()")
|
||||||
|
dbSendUpdate(conn, "create database if not exists rtest")
|
||||||
|
dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))")
|
||||||
|
dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')")
|
||||||
|
dbGetQuery(conn, "select * from rtest.test")
|
||||||
dbDisconnect(conn)
|
dbDisconnect(conn)
|
||||||
# ANCHOR_END: demo
|
# ANCHOR_END: demo
|
||||||
|
|
|
@ -2,11 +2,19 @@ if (! "RJDBC" %in% installed.packages()[, "Package"]) {
|
||||||
install.packages('RJDBC', repos='http://cran.us.r-project.org')
|
install.packages('RJDBC', repos='http://cran.us.r-project.org')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ANCHOR: demo
|
||||||
library("DBI")
|
library("DBI")
|
||||||
library("rJava")
|
library("rJava")
|
||||||
library("RJDBC")
|
library("RJDBC")
|
||||||
driver_path = "/home/debug/build/lib/taos-jdbcdriver-2.0.38-dist.jar"
|
|
||||||
|
args<- commandArgs(trailingOnly = TRUE)
|
||||||
|
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar"
|
||||||
driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path)
|
driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path)
|
||||||
conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata")
|
conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata")
|
||||||
dbGetQuery(conn, "SELECT server_version()")
|
dbGetQuery(conn, "SELECT server_version()")
|
||||||
|
dbSendUpdate(conn, "create database if not exists rtest")
|
||||||
|
dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))")
|
||||||
|
dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')")
|
||||||
|
dbGetQuery(conn, "select * from rtest.test")
|
||||||
dbDisconnect(conn)
|
dbDisconnect(conn)
|
||||||
|
# ANCHOR_END: demo
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
apt install -y libbz2-dev libpcre2-dev libicu-dev
|
|
@ -78,7 +78,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR: {
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
memcpy(str + len, row[i], charLen);
|
memcpy(str + len, row[i], charLen);
|
||||||
len += charLen;
|
len += charLen;
|
||||||
|
|
|
@ -76,7 +76,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR: {
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
memcpy(str + len, row[i], charLen);
|
memcpy(str + len, row[i], charLen);
|
||||||
len += charLen;
|
len += charLen;
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.1</version>
|
<version>3.2.4</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- ANCHOR_END: dep-->
|
<!-- ANCHOR_END: dep-->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|