chore: merge 3.0
|
@ -16,7 +16,6 @@ debug/
|
|||
release/
|
||||
target/
|
||||
debs/
|
||||
deps/
|
||||
rpms/
|
||||
mac/
|
||||
*.pyc
|
||||
|
@ -131,3 +130,4 @@ tools/BUGS
|
|||
tools/taos-tools
|
||||
tools/taosws-rs
|
||||
tags
|
||||
.clangd
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v2.3.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: stable
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pocc/pre-commit-hooks
|
||||
rev: master
|
||||
hooks:
|
||||
- id: cppcheck
|
||||
args: ["--error-exitcode=0"]
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.15.7
|
||||
hooks:
|
||||
- id: typos
|
||||
|
|
@ -15,11 +15,15 @@ SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
|||
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
||||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||
|
||||
|
||||
|
||||
|
||||
include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||
|
||||
|
||||
# contrib
|
||||
add_subdirectory(contrib)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
|
||||
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
|
||||
|
||||
如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。
|
||||
如遇任何问题,请添加官方微信 tdengine1。我们的团队会帮忙解决。
|
||||
|
||||
## 给贡献者的礼品
|
||||
|
||||
|
@ -48,4 +48,4 @@ TDengine 社区致力于让更多的开发者理解和使用它。
|
|||
|
||||
## 联系我们
|
||||
|
||||
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO
|
||||
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:tdengine1。
|
||||
|
|
|
@ -314,7 +314,7 @@ def pre_test_build_win() {
|
|||
cd %WIN_CONNECTOR_ROOT%
|
||||
python.exe -m pip install --upgrade pip
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.6
|
||||
python -m pip install taospy==2.7.10
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
|
|
14
README-CN.md
|
@ -15,7 +15,7 @@
|
|||
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
|
||||
简体中文 | [English](README.md) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
|
||||
|
||||
# TDengine 简介
|
||||
|
||||
|
@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
|||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
```
|
||||
|
||||
### CentOS 8 & Fedora
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||
sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel
|
||||
```
|
||||
|
||||
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
||||
|
@ -88,7 +88,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
#### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```
|
||||
sudo yum install -y epel-release
|
||||
|
@ -101,7 +101,7 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
|
|||
|
||||
若 powertools 安装失败,可以尝试改用:
|
||||
```
|
||||
sudo yum config-manager --set-enabled Powertools
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
#### CentOS + devtoolset
|
||||
|
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
|
|||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig
|
||||
brew install argp-standalone gflags pkgconfig
|
||||
```
|
||||
|
||||
### 设置 golang 开发环境
|
||||
|
|
10
README.md
|
@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
|||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
```
|
||||
|
||||
### CentOS 8 & Fedora
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools on CentOS
|
||||
|
@ -94,7 +94,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
#### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```
|
||||
sudo yum install -y epel-release
|
||||
|
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
|
|||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig
|
||||
brew install argp-standalone gflags pkgconfig
|
||||
```
|
||||
|
||||
### Setup golang environment
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE OFF)
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
|
@ -115,15 +115,6 @@ ELSE ()
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||
ENDIF ()
|
||||
|
||||
IF (${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
# disable all assert
|
||||
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
||||
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
||||
|
@ -165,4 +156,20 @@ ELSE ()
|
|||
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
|
||||
ENDIF()
|
||||
|
||||
# build mode
|
||||
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
|
||||
IF (${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
ENDIF ()
|
||||
|
|
|
@ -64,12 +64,25 @@ IF(${TD_WINDOWS})
|
|||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build geos Win32")
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build geos on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
ELSEIF (TD_DARWIN_64)
|
||||
IF(${BUILD_TEST})
|
||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build geos on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
|
@ -109,7 +122,7 @@ option(
|
|||
option(
|
||||
BUILD_WITH_ROCKSDB
|
||||
"If build with rocksdb"
|
||||
OFF
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
|
@ -171,3 +184,14 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_RELEASE
|
||||
"If build release version"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_CONTRIB
|
||||
"If build thirdpart from source"
|
||||
OFF
|
||||
)
|
||||
|
|
|
@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "")
|
|||
SET(TD_LOONGARCH_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
|
||||
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
|
||||
SET(PLATFORM_ARCH_STR "mips")
|
||||
MESSAGE(STATUS "input cpuType: mips64")
|
||||
SET(TD_MIPS_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_MIPS_")
|
||||
ADD_DEFINITIONS("-D_TD_MIPS_64")
|
||||
ENDIF ()
|
||||
ELSE ()
|
||||
# if generate ARM version:
|
||||
|
@ -162,7 +168,27 @@ ELSE ()
|
|||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
IF(APPLE)
|
||||
set(CMAKE_THREAD_LIBS_INIT "-lpthread")
|
||||
set(CMAKE_HAVE_THREADS_LIBRARY 1)
|
||||
set(CMAKE_USE_WIN32_THREADS_INIT 0)
|
||||
set(CMAKE_USE_PTHREADS 1)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
ENDIF()
|
||||
|
||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||
|
||||
set(TD_DEPS_DIR "x86")
|
||||
if (TD_LINUX)
|
||||
IF (TD_ARM_64 OR TD_ARM_32)
|
||||
set(TD_DEPS_DIR "arm")
|
||||
ELSEIF (TD_MIPS_64)
|
||||
set(TD_DEPS_DIR "mips")
|
||||
ELSE()
|
||||
set(TD_DEPS_DIR "x86")
|
||||
ENDIF()
|
||||
endif()
|
||||
MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR})
|
||||
|
||||
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
||||
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.4.1")
|
||||
SET(TD_VER_NUMBER "3.1.0.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
|
||||
# geos
|
||||
ExternalProject_Add(geos
|
||||
GIT_REPOSITORY https://github.com/libgeos/geos.git
|
||||
GIT_TAG 3.12.0
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/geos"
|
||||
BINARY_DIR ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -1,11 +1,29 @@
|
|||
|
||||
# rocksdb
|
||||
ExternalProject_Add(rocksdb
|
||||
GIT_REPOSITORY https://github.com/taosdata-contrib/rocksdb.git
|
||||
GIT_TAG v6.23.3
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
if (${BUILD_CONTRIB})
|
||||
ExternalProject_Add(rocksdb
|
||||
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
|
||||
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
ExternalProject_Add(rocksdb
|
||||
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
|
||||
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
# stub
|
||||
ExternalProject_Add(stub
|
||||
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
||||
GIT_TAG 5e903b8e
|
||||
GIT_SUBMODULES "src"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 565ca21
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 4378702
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -77,11 +77,23 @@ if(${BUILD_WITH_LEVELDB})
|
|||
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_LEVELDB})
|
||||
|
||||
# rocksdb
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
if (${BUILD_CONTRIB})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif()
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
else()
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# canonical-raft
|
||||
if(${BUILD_WITH_CRAFT})
|
||||
|
@ -134,6 +146,11 @@ if(${BUILD_ADDR2LINE})
|
|||
endif(NOT ${TD_WINDOWS})
|
||||
endif(${BUILD_ADDR2LINE})
|
||||
|
||||
# geos
|
||||
if(${BUILD_GEOS})
|
||||
cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# download dependencies
|
||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
|
@ -170,8 +187,8 @@ if(${BUILD_TEST})
|
|||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cpp-stub/src_darwin>
|
||||
)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
|
||||
|
||||
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
# cJson
|
||||
|
@ -222,19 +239,113 @@ endif(${BUILD_WITH_LEVELDB})
|
|||
|
||||
# rocksdb
|
||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
option(WITH_TESTS "" OFF)
|
||||
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||
option(WITH_TOOLS "" OFF)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
rocksdb
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||
)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
if (${BUILD_WITH_UV})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
endif (${BUILD_WITH_UV})
|
||||
|
||||
if (${BUILD_WITH_ROCKSDB})
|
||||
if (${BUILD_CONTRIB})
|
||||
if(${TD_LINUX})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
|
||||
if(${TD_DARWIN})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
if (${TD_DARWIN_ARM64})
|
||||
set(HAS_ARMV8_CRC true)
|
||||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||
option(WITH_JNI "" OFF)
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
if(${TD_DARWIN})
|
||||
option(HAVE_THREAD_LOCAL "" OFF)
|
||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||
option(WITH_PERF_CONTEXT "" OFF)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
option(WITH_FALLOCATE "" OFF)
|
||||
option(WITH_JEMALLOC "" OFF)
|
||||
option(WITH_GFLAGS "" OFF)
|
||||
option(PORTABLE "" ON)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
option(FAIL_ON_WARNINGS OFF)
|
||||
|
||||
option(WITH_TESTS "" OFF)
|
||||
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||
option(WITH_TOOLS "" OFF)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
rocksdb
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||
)
|
||||
else()
|
||||
if (NOT ${TD_LINUX})
|
||||
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
if(${TD_DARWIN})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
if (${TD_DARWIN_ARM64})
|
||||
set(HAS_ARMV8_CRC true)
|
||||
endif(${TD_DARWIN_ARM64})
|
||||
|
||||
if (${TD_WINDOWS})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
|
||||
option(WITH_JNI "" OFF)
|
||||
option(WITH_MD_LIBRARY "build with MD" OFF)
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
|
||||
if(${TD_DARWIN})
|
||||
option(HAVE_THREAD_LOCAL "" OFF)
|
||||
option(WITH_IOSTATS_CONTEXT "" OFF)
|
||||
option(WITH_PERF_CONTEXT "" OFF)
|
||||
endif(${TD_DARWIN})
|
||||
|
||||
option(WITH_FALLOCATE "" OFF)
|
||||
option(WITH_JEMALLOC "" OFF)
|
||||
option(WITH_GFLAGS "" OFF)
|
||||
option(PORTABLE "" ON)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
option(FAIL_ON_WARNINGS OFF)
|
||||
|
||||
option(WITH_TESTS "" OFF)
|
||||
option(WITH_BENCHMARK_TOOLS "" OFF)
|
||||
option(WITH_TOOLS "" OFF)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
rocksdb
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
|
||||
)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# lucene
|
||||
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
||||
|
@ -242,10 +353,10 @@ if(${BUILD_WITH_LUCENE})
|
|||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
add_subdirectory(lucene EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
lucene++
|
||||
lucene++
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lucene/include>
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
endif(${BUILD_WITH_LUCENE})
|
||||
|
||||
# NuRaft
|
||||
|
@ -305,7 +416,7 @@ if(${BUILD_MSVCREGEX})
|
|||
target_include_directories(msvcregex
|
||||
PRIVATE "msvcregex"
|
||||
)
|
||||
target_link_libraries(msvcregex
|
||||
target_link_libraries(msvcregex
|
||||
INTERFACE Shell32
|
||||
)
|
||||
SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex)
|
||||
|
@ -365,8 +476,8 @@ if(${BUILD_WITH_BDB})
|
|||
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/bdb/libdb.a"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/bdb"
|
||||
)
|
||||
target_link_libraries(bdb
|
||||
INTERFACE pthread
|
||||
target_link_libraries(bdb
|
||||
INTERFACE pthread
|
||||
)
|
||||
endif(${BUILD_WITH_BDB})
|
||||
|
||||
|
@ -378,12 +489,12 @@ if(${BUILD_WITH_SQLITE})
|
|||
IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/sqlite/.libs/libsqlite3.a"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/sqlite"
|
||||
)
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE m
|
||||
INTERFACE pthread
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE m
|
||||
INTERFACE pthread
|
||||
)
|
||||
if(NOT TD_WINDOWS)
|
||||
target_link_libraries(sqlite
|
||||
target_link_libraries(sqlite
|
||||
INTERFACE dl
|
||||
)
|
||||
endif(NOT TD_WINDOWS)
|
||||
|
@ -391,22 +502,22 @@ endif(${BUILD_WITH_SQLITE})
|
|||
|
||||
# addr2line
|
||||
if(${BUILD_ADDR2LINE})
|
||||
if(NOT ${TD_WINDOWS})
|
||||
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
|
||||
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
|
||||
check_include_file( "stddef.h" HAVE_STDDEF_H )
|
||||
check_include_file( "stdlib.h" HAVE_STDLIB_H )
|
||||
check_include_file( "string.h" HAVE_STRING_H )
|
||||
check_include_file( "memory.h" HAVE_MEMORY_H )
|
||||
check_include_file( "strings.h" HAVE_STRINGS_H )
|
||||
if(NOT ${TD_WINDOWS})
|
||||
check_include_file( "sys/types.h" HAVE_SYS_TYPES_H)
|
||||
check_include_file( "sys/stat.h" HAVE_SYS_STAT_H )
|
||||
check_include_file( "inttypes.h" HAVE_INTTYPES_H )
|
||||
check_include_file( "stddef.h" HAVE_STDDEF_H )
|
||||
check_include_file( "stdlib.h" HAVE_STDLIB_H )
|
||||
check_include_file( "string.h" HAVE_STRING_H )
|
||||
check_include_file( "memory.h" HAVE_MEMORY_H )
|
||||
check_include_file( "strings.h" HAVE_STRINGS_H )
|
||||
check_include_file( "stdint.h" HAVE_STDINT_H )
|
||||
check_include_file( "unistd.h" HAVE_UNISTD_H )
|
||||
check_include_file( "sgidefs.h" HAVE_SGIDEFS_H )
|
||||
check_include_file( "stdafx.h" HAVE_STDAFX_H )
|
||||
check_include_file( "elf.h" HAVE_ELF_H )
|
||||
check_include_file( "libelf.h" HAVE_LIBELF_H )
|
||||
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file( "elf.h" HAVE_ELF_H )
|
||||
check_include_file( "libelf.h" HAVE_LIBELF_H )
|
||||
check_include_file( "libelf/libelf.h" HAVE_LIBELF_LIBELF_H)
|
||||
check_include_file( "alloca.h" HAVE_ALLOCA_H )
|
||||
check_include_file( "elfaccess.h" HAVE_ELFACCESS_H)
|
||||
check_include_file( "sys/elf_386.h" HAVE_SYS_ELF_386_H )
|
||||
|
@ -414,7 +525,7 @@ if(${BUILD_ADDR2LINE})
|
|||
check_include_file( "sys/elf_sparc.h" HAVE_SYS_ELF_SPARC_H)
|
||||
check_include_file( "sys/ia64/elf.h" HAVE_SYS_IA64_ELF_H )
|
||||
set(VERSION 0.3.1)
|
||||
set(PACKAGE_VERSION "\"${VERSION}\"")
|
||||
set(PACKAGE_VERSION "\"${VERSION}\"")
|
||||
configure_file(libdwarf/cmake/config.h.cmake config.h)
|
||||
file(GLOB_RECURSE LIBDWARF_SOURCES "libdwarf/src/lib/libdwarf/*.c")
|
||||
add_library(libdwarf STATIC ${LIBDWARF_SOURCES})
|
||||
|
@ -434,6 +545,23 @@ if(${BUILD_ADDR2LINE})
|
|||
endif(NOT ${TD_WINDOWS})
|
||||
endif(${BUILD_ADDR2LINE})
|
||||
|
||||
# geos
|
||||
if(${BUILD_GEOS})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||
unset(CMAKE_CXX_STANDARD CACHE) # undo libgeos's setting of global CMAKE_CXX_STANDARD
|
||||
target_include_directories(
|
||||
geos_c
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||
)
|
||||
endif(${BUILD_GEOS})
|
||||
|
||||
# ================================================================================================
|
||||
# Build test
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
message("contrib test/rocksdb:" ${BUILD_DEPENDENCY_TESTS})
|
||||
|
||||
add_executable(rocksdbTest "")
|
||||
target_sources(rocksdbTest
|
||||
PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
|
||||
)
|
||||
target_link_libraries(rocksdbTest rocksdb)
|
||||
target_link_libraries(rocksdbTest rocksdb)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <assert.h>
|
||||
#include <bits/stdint-uintn.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
@ -9,38 +10,307 @@
|
|||
const char DBPath[] = "rocksdb_c_simple_example";
|
||||
const char DBBackupPath[] = "/tmp/rocksdb_c_simple_example_backup";
|
||||
|
||||
static const int32_t endian_test_var = 1;
|
||||
#define IS_LITTLE_ENDIAN() (*(uint8_t *)(&endian_test_var) != 0)
|
||||
#define TD_RT_ENDIAN() (IS_LITTLE_ENDIAN() ? TD_LITTLE_ENDIAN : TD_BIG_ENDIAN)
|
||||
|
||||
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
|
||||
static void *taosDecodeFixedU64(const void *buf, uint64_t *value) {
|
||||
if (IS_LITTLE_ENDIAN()) {
|
||||
memcpy(value, buf, sizeof(*value));
|
||||
} else {
|
||||
((uint8_t *)value)[7] = ((uint8_t *)buf)[0];
|
||||
((uint8_t *)value)[6] = ((uint8_t *)buf)[1];
|
||||
((uint8_t *)value)[5] = ((uint8_t *)buf)[2];
|
||||
((uint8_t *)value)[4] = ((uint8_t *)buf)[3];
|
||||
((uint8_t *)value)[3] = ((uint8_t *)buf)[4];
|
||||
((uint8_t *)value)[2] = ((uint8_t *)buf)[5];
|
||||
((uint8_t *)value)[1] = ((uint8_t *)buf)[6];
|
||||
((uint8_t *)value)[0] = ((uint8_t *)buf)[7];
|
||||
}
|
||||
|
||||
return POINTER_SHIFT(buf, sizeof(*value));
|
||||
}
|
||||
|
||||
// ---- Fixed U64
|
||||
static int32_t taosEncodeFixedU64(void **buf, uint64_t value) {
|
||||
if (buf != NULL) {
|
||||
if (IS_LITTLE_ENDIAN()) {
|
||||
memcpy(*buf, &value, sizeof(value));
|
||||
} else {
|
||||
((uint8_t *)(*buf))[0] = value & 0xff;
|
||||
((uint8_t *)(*buf))[1] = (value >> 8) & 0xff;
|
||||
((uint8_t *)(*buf))[2] = (value >> 16) & 0xff;
|
||||
((uint8_t *)(*buf))[3] = (value >> 24) & 0xff;
|
||||
((uint8_t *)(*buf))[4] = (value >> 32) & 0xff;
|
||||
((uint8_t *)(*buf))[5] = (value >> 40) & 0xff;
|
||||
((uint8_t *)(*buf))[6] = (value >> 48) & 0xff;
|
||||
((uint8_t *)(*buf))[7] = (value >> 56) & 0xff;
|
||||
}
|
||||
|
||||
*buf = POINTER_SHIFT(*buf, sizeof(value));
|
||||
}
|
||||
|
||||
return (int32_t)sizeof(value);
|
||||
}
|
||||
|
||||
typedef struct KV {
|
||||
uint64_t k1;
|
||||
uint64_t k2;
|
||||
} KV;
|
||||
|
||||
int kvSerial(KV *kv, char *buf) {
|
||||
int len = 0;
|
||||
len += taosEncodeFixedU64((void **)&buf, kv->k1);
|
||||
len += taosEncodeFixedU64((void **)&buf, kv->k2);
|
||||
return len;
|
||||
}
|
||||
const char *kvDBName(void *name) { return "kvDBname"; }
|
||||
int kvDBComp(void *state, const char *aBuf, size_t aLen, const char *bBuf, size_t bLen) {
|
||||
KV w1, w2;
|
||||
|
||||
memset(&w1, 0, sizeof(w1));
|
||||
memset(&w2, 0, sizeof(w2));
|
||||
|
||||
char *p1 = (char *)aBuf;
|
||||
char *p2 = (char *)bBuf;
|
||||
// p1 += 1;
|
||||
// p2 += 1;
|
||||
|
||||
p1 = taosDecodeFixedU64(p1, &w1.k1);
|
||||
p2 = taosDecodeFixedU64(p2, &w2.k1);
|
||||
|
||||
p1 = taosDecodeFixedU64(p1, &w1.k2);
|
||||
p2 = taosDecodeFixedU64(p2, &w2.k2);
|
||||
|
||||
if (w1.k1 < w2.k1) {
|
||||
return -1;
|
||||
} else if (w1.k1 > w2.k1) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (w1.k2 < w2.k2) {
|
||||
return -1;
|
||||
} else if (w1.k2 > w2.k2) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
int kvDeserial(KV *kv, char *buf) {
|
||||
char *p1 = (char *)buf;
|
||||
// p1 += 1;
|
||||
p1 = taosDecodeFixedU64(p1, &kv->k1);
|
||||
p1 = taosDecodeFixedU64(p1, &kv->k2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char const *argv[]) {
|
||||
rocksdb_t * db;
|
||||
rocksdb_t *db;
|
||||
rocksdb_backup_engine_t *be;
|
||||
rocksdb_options_t * options = rocksdb_options_create();
|
||||
rocksdb_options_set_create_if_missing(options, 1);
|
||||
|
||||
// open DB
|
||||
char *err = NULL;
|
||||
db = rocksdb_open(options, DBPath, &err);
|
||||
char *err = NULL;
|
||||
const char *path = "/tmp/db";
|
||||
|
||||
// Write
|
||||
rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
|
||||
rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
|
||||
rocksdb_options_t *opt = rocksdb_options_create();
|
||||
rocksdb_options_set_create_if_missing(opt, 1);
|
||||
rocksdb_options_set_create_missing_column_families(opt, 1);
|
||||
|
||||
// Read
|
||||
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
||||
rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||
int len = 1;
|
||||
char buf[256] = {0};
|
||||
size_t vallen = 0;
|
||||
char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||
printf("val:%s\n", val);
|
||||
char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||
snprintf(buf, vallen + 5, "val:%s", val);
|
||||
printf("%ld %ld %s\n", strlen(val), vallen, buf);
|
||||
|
||||
// Update
|
||||
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
|
||||
char **cfName = calloc(len, sizeof(char *));
|
||||
for (int i = 0; i < len; i++) {
|
||||
cfName[i] = "test";
|
||||
}
|
||||
const rocksdb_options_t **cfOpt = malloc(len * sizeof(rocksdb_options_t *));
|
||||
for (int i = 0; i < len; i++) {
|
||||
cfOpt[i] = rocksdb_options_create_copy(opt);
|
||||
if (i != 0) {
|
||||
rocksdb_comparator_t *comp = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||
rocksdb_options_set_comparator((rocksdb_options_t *)cfOpt[i], comp);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete
|
||||
rocksdb_delete(db, writeoptions, "key", 3, &err);
|
||||
rocksdb_column_family_handle_t **cfHandle = malloc(len * sizeof(rocksdb_column_family_handle_t *));
|
||||
db = rocksdb_open_column_families(opt, path, len, (const char *const *)cfName, cfOpt, cfHandle, &err);
|
||||
|
||||
// Read again
|
||||
val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||
printf("val:%s\n", val);
|
||||
{
|
||||
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
|
||||
size_t vlen = 0;
|
||||
|
||||
char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
|
||||
printf("Get value %s, and len = %d\n", v, (int)vlen);
|
||||
}
|
||||
|
||||
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
|
||||
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
|
||||
rocksdb_writebatch_put_cf(wBatch, cfHandle[0], "key", strlen("key"), "value", strlen("value"));
|
||||
rocksdb_write(db, wOpt, wBatch, &err);
|
||||
|
||||
rocksdb_readoptions_t *rOpt = rocksdb_readoptions_create();
|
||||
size_t vlen = 0;
|
||||
|
||||
{
|
||||
rocksdb_writeoptions_t *wOpt = rocksdb_writeoptions_create();
|
||||
rocksdb_writebatch_t *wBatch = rocksdb_writebatch_create();
|
||||
for (int i = 0; i < 100; i++) {
|
||||
char buf[128] = {0};
|
||||
KV kv = {.k1 = (100 - i) % 26, .k2 = i % 26};
|
||||
kvSerial(&kv, buf);
|
||||
rocksdb_writebatch_put_cf(wBatch, cfHandle[1], buf, sizeof(kv), "value", strlen("value"));
|
||||
}
|
||||
rocksdb_write(db, wOpt, wBatch, &err);
|
||||
}
|
||||
{
|
||||
{
|
||||
char buf[128] = {0};
|
||||
KV kv = {.k1 = 0, .k2 = 0};
|
||||
kvSerial(&kv, buf);
|
||||
char *v = rocksdb_get_cf(db, rOpt, cfHandle[1], buf, sizeof(kv), &vlen, &err);
|
||||
printf("Get value %s, and len = %d, xxxx\n", v, (int)vlen);
|
||||
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||
rocksdb_iter_seek_to_first(iter);
|
||||
int i = 0;
|
||||
while (rocksdb_iter_valid(iter)) {
|
||||
size_t klen, vlen;
|
||||
const char *key = rocksdb_iter_key(iter, &klen);
|
||||
const char *value = rocksdb_iter_value(iter, &vlen);
|
||||
KV kv;
|
||||
kvDeserial(&kv, (char *)key);
|
||||
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
|
||||
i++;
|
||||
rocksdb_iter_next(iter);
|
||||
}
|
||||
rocksdb_iter_destroy(iter);
|
||||
}
|
||||
{
|
||||
char buf[128] = {0};
|
||||
KV kv = {.k1 = 0, .k2 = 0};
|
||||
int len = kvSerial(&kv, buf);
|
||||
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||
rocksdb_iter_seek(iter, buf, len);
|
||||
if (!rocksdb_iter_valid(iter)) {
|
||||
printf("invalid iter");
|
||||
}
|
||||
{
|
||||
char buf[128] = {0};
|
||||
KV kv = {.k1 = 100, .k2 = 0};
|
||||
int len = kvSerial(&kv, buf);
|
||||
|
||||
rocksdb_iterator_t *iter = rocksdb_create_iterator_cf(db, rOpt, cfHandle[1]);
|
||||
rocksdb_iter_seek(iter, buf, len);
|
||||
if (!rocksdb_iter_valid(iter)) {
|
||||
printf("invalid iter\n");
|
||||
rocksdb_iter_seek_for_prev(iter, buf, len);
|
||||
if (!rocksdb_iter_valid(iter)) {
|
||||
printf("stay invalid iter\n");
|
||||
} else {
|
||||
size_t klen = 0, vlen = 0;
|
||||
const char *key = rocksdb_iter_key(iter, &klen);
|
||||
const char *value = rocksdb_iter_value(iter, &vlen);
|
||||
KV kv;
|
||||
kvDeserial(&kv, (char *)key);
|
||||
printf("kv1: %d\t kv2: %d, len:%d, value = %s\n", (int)(kv.k1), (int)(kv.k2), (int)(klen), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// char *v = rocksdb_get_cf(db, rOpt, cfHandle[0], "key", strlen("key"), &vlen, &err);
|
||||
// printf("Get value %s, and len = %d\n", v, (int)vlen);
|
||||
|
||||
rocksdb_column_family_handle_destroy(cfHandle[0]);
|
||||
rocksdb_column_family_handle_destroy(cfHandle[1]);
|
||||
rocksdb_close(db);
|
||||
|
||||
// {
|
||||
// // rocksdb_options_t *Options = rocksdb_options_create();
|
||||
// db = rocksdb_open(comm, path, &err);
|
||||
// if (db != NULL) {
|
||||
// rocksdb_options_t *cfo = rocksdb_options_create_copy(comm);
|
||||
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||
// rocksdb_options_set_comparator(cfo, cmp1);
|
||||
|
||||
// rocksdb_column_family_handle_t *handle = rocksdb_create_column_family(db, cfo, "cf1", &err);
|
||||
|
||||
// rocksdb_column_family_handle_destroy(handle);
|
||||
// rocksdb_close(db);
|
||||
// db = NULL;
|
||||
// }
|
||||
// }
|
||||
|
||||
// int ncf = 2;
|
||||
|
||||
// rocksdb_column_family_handle_t **pHandle = malloc(ncf * sizeof(rocksdb_column_family_handle_t *));
|
||||
|
||||
// {
|
||||
// rocksdb_options_t *options = rocksdb_options_create_copy(comm);
|
||||
|
||||
// rocksdb_comparator_t *cmp1 = rocksdb_comparator_create(NULL, NULL, kvDBComp, kvDBName);
|
||||
// rocksdb_options_t *dbOpts1 = rocksdb_options_create_copy(comm);
|
||||
// rocksdb_options_t *dbOpts2 = rocksdb_options_create_copy(comm);
|
||||
// rocksdb_options_set_comparator(dbOpts2, cmp1);
|
||||
// // rocksdb_column_family_handle_t *cf = rocksdb_create_column_family(db, dbOpts1, "cmp1", &err);
|
||||
|
||||
// const char *pName[] = {"default", "cf1"};
|
||||
|
||||
// const rocksdb_options_t **pOpts = malloc(ncf * sizeof(rocksdb_options_t *));
|
||||
// pOpts[0] = dbOpts1;
|
||||
// pOpts[1] = dbOpts2;
|
||||
|
||||
// rocksdb_options_t *allOptions = rocksdb_options_create_copy(comm);
|
||||
// db = rocksdb_open_column_families(allOptions, "test", ncf, pName, pOpts, pHandle, &err);
|
||||
// }
|
||||
|
||||
// // rocksdb_options_t *options = rocksdb_options_create();
|
||||
// // rocksdb_options_set_create_if_missing(options, 1);
|
||||
|
||||
// // //rocksdb_open_column_families(const rocksdb_options_t *options, const char *name, int num_column_families,
|
||||
// // const char *const *column_family_names,
|
||||
// // const rocksdb_options_t *const *column_family_options,
|
||||
// // rocksdb_column_family_handle_t **column_family_handles, char **errptr);
|
||||
|
||||
// for (int i = 0; i < 100; i++) {
|
||||
// char buf[128] = {0};
|
||||
|
||||
// rocksdb_writeoptions_t *wopt = rocksdb_writeoptions_create();
|
||||
// KV kv = {.k1 = i, .k2 = i};
|
||||
// kvSerial(&kv, buf);
|
||||
// rocksdb_put_cf(db, wopt, pHandle[0], buf, strlen(buf), (const char *)&i, sizeof(i), &err);
|
||||
// }
|
||||
|
||||
// rocksdb_close(db);
|
||||
// Write
|
||||
// rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
|
||||
// rocksdb_put(db, writeoptions, "key", 3, "value", 5, &err);
|
||||
|
||||
//// Read
|
||||
// rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
|
||||
// rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
|
||||
// size_t vallen = 0;
|
||||
// char *val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||
// printf("val:%s\n", val);
|
||||
|
||||
//// Update
|
||||
//// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
|
||||
|
||||
//// Delete
|
||||
// rocksdb_delete(db, writeoptions, "key", 3, &err);
|
||||
|
||||
//// Read again
|
||||
// val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
|
||||
// printf("val:%s\n", val);
|
||||
|
||||
// rocksdb_close(db);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
After Width: | Height: | Size: 13 KiB |
After Width: | Height: | Size: 14 KiB |
|
@ -4,7 +4,7 @@ if(${BUILD_DOCS})
|
|||
find_package(Doxygen)
|
||||
if (DOXYGEN_FOUND)
|
||||
# Build the doc
|
||||
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/Doxyfile.in)
|
||||
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/doxgen/Doxyfile.in)
|
||||
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
|
||||
|
||||
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||
|
|
|
@ -5,7 +5,7 @@ description: This website contains the user manuals for TDengine, an open-source
|
|||
slug: /
|
||||
---
|
||||
|
||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It's written mainly for architects, developers, and system administrators.
|
||||
|
||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation.
|
|||
|
||||
## Competitive Advantages
|
||||
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
|
||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages.
|
||||
|
||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
|
@ -57,7 +57,7 @@ By making full use of [characteristics of time series data](https://tdengine.com
|
|||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine's core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
||||
|
||||
|
@ -109,8 +109,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||
| Very large total processing capacity | | | √ | TDengine's cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine's storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
|
||||
### System Maintenance Requirements
|
||||
|
@ -123,13 +123,12 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
## Comparison with other databases
|
||||
|
||||
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
|
||||
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
|
||||
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
|
||||
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
|
||||
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
|
||||
- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/)
|
||||
- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/)
|
||||
- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
|
||||
- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
|
||||
|
||||
## More readings
|
||||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ To make full use of time-series data characteristics, TDengine adopts a strategy
|
|||
|
||||
If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
|
||||
|
||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
|
||||
TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won't build the index on any metrics stored. Column wise storage is used.
|
||||
|
||||
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain
|
|||
|
||||
This document describes how to install TDengine in a Docker container and perform queries and inserts.
|
||||
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
|
||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
|
||||
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
|
||||
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
|
||||
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
|
||||
|
||||
|
@ -20,6 +20,19 @@ The standard server installation package includes `taos`, `taosd`, `taosAdapter`
|
|||
|
||||
The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on x64 Windows and x64/m1 macOS.
|
||||
|
||||
## Operating environment requirements
|
||||
In the Linux system, the minimum requirements for the operating environment are as follows:
|
||||
|
||||
linux core version - 3.10.0-1160.83.1.el7.x86_64;
|
||||
|
||||
glibc version - 2.17;
|
||||
|
||||
If compiling and installing through clone source code, it is also necessary to meet the following requirements:
|
||||
|
||||
cmake version - 3.26.4 or above;
|
||||
|
||||
gcc version - 9.3.1 or above;
|
||||
|
||||
## Installation
|
||||
|
||||
<Tabs>
|
||||
|
@ -102,7 +115,7 @@ sudo apt-get install tdengine
|
|||
|
||||
:::tip
|
||||
This installation method is supported only for Debian and Ubuntu.
|
||||
::::
|
||||
:::
|
||||
</TabItem>
|
||||
<TabItem label="Windows" value="windows">
|
||||
|
||||
|
@ -208,6 +221,8 @@ The following `launchctl` commands can help you manage TDengine service:
|
|||
|
||||
- Check TDengine Server status: `sudo launchctl list | grep taosd`
|
||||
|
||||
- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd`
|
||||
|
||||
:::info
|
||||
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
|
||||
- The administrator privilege is required for service management to enhance security.
|
||||
|
|
|
@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
|
|||
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
|
||||
```
|
||||
|
||||
More configuration about connection,please refer to [Java Connector](/reference/connector/java)
|
||||
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```php title="原生连接"
|
||||
```php title=""native"
|
||||
{{#include docs/examples/php/connect.php}}
|
||||
```
|
||||
|
|
|
@ -33,7 +33,7 @@ There are two ways for a connector to establish connections to TDengine:
|
|||
|
||||
For REST and native connections, connectors provide similar APIs for performing operations and running SQL statements on your databases. The main difference is the method of establishing the connection, which is not visible to users.
|
||||
|
||||
Key differences:
|
||||
Key differences:
|
||||
|
||||
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
|
||||
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
|
||||
|
@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -198,7 +198,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
|
|||
<TabItem label="R" value="r">
|
||||
|
||||
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
|
||||
2. Install the dependency package `RJDBC`:
|
||||
2. Install the dependency package `RJDBC`:
|
||||
|
||||
```R
|
||||
install.packages("RJDBC")
|
||||
|
@ -213,7 +213,7 @@ If the client driver (taosc) is already installed, then the C connector is alrea
|
|||
</TabItem>
|
||||
<TabItem label="PHP" value="php">
|
||||
|
||||
**Download Source Code Package and Unzip:**
|
||||
**Download Source Code Package and Unzip: **
|
||||
|
||||
```shell
|
||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||
|
@ -223,13 +223,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please check available version from [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
|
||||
**Non-Swoole Environment:**
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure && make -j && make install
|
||||
```
|
||||
|
||||
**Specify TDengine Location:**
|
||||
**Specify TDengine Location: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||
|
@ -238,7 +238,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
|||
> `--with-tdengine-dir=` is followed by the TDengine installation location.
|
||||
> This way is useful in case TDengine location can't be found automatically or macOS.
|
||||
|
||||
**Swoole Environment:**
|
||||
**Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --enable-swoole && make -j && make install
|
||||
|
@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a
|
|||
</Tabs>
|
||||
|
||||
:::tip
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq).
|
||||
|
||||
:::
|
||||
|
|
|
@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
|
|||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
|
||||
```
|
||||
|
||||
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
|
||||
### Insert Multiple Rows
|
||||
|
||||
|
@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
|
|||
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
|
||||
```
|
||||
|
||||
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
|
||||
### Insert into Multiple Tables
|
||||
|
||||
|
@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example
|
|||
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
|
||||
```
|
||||
|
||||
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
|
||||
|
||||
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
|
|||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query SQL:
|
||||
If you want query the data of `location=California.LosAngeles,groupid=2`, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM meters WHERE location = "California.LosAngeles" AND groupid = 2;
|
||||
|
|
|
@ -84,7 +84,7 @@ Query OK, 4 row(s) in set (0.005399s)
|
|||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of `location=California.LosAngeles groupid=3`,here is the query SQL:
|
||||
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
|
|
|
@ -97,7 +97,7 @@ Query OK, 2 row(s) in set (0.004076s)
|
|||
|
||||
## Query Examples
|
||||
|
||||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query SQL:
|
||||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
|
|
|
@ -49,7 +49,7 @@ If the data source is Kafka, then the application program is a consumer of Kafka
|
|||
|
||||
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
||||
|
||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
|
||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config).
|
||||
|
||||
## Sample Programs
|
||||
|
||||
|
@ -98,7 +98,7 @@ The main Program is responsible for:
|
|||
3. Start reading threads
|
||||
4. Output writing speed every 10 seconds
|
||||
|
||||
The main program provides 4 parameters for tuning:
|
||||
The main program provides 4 parameters for tuning:
|
||||
|
||||
1. The number of reading threads, default value is 1
|
||||
2. The number of writing threads, default value is 2
|
||||
|
@ -192,7 +192,7 @@ TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
|
|||
|
||||
If you want to launch the sample program on a remote server, please follow below steps:
|
||||
|
||||
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
|
||||
1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java`:
|
||||
```
|
||||
mvn package
|
||||
```
|
||||
|
@ -385,7 +385,7 @@ SQLWriter class encapsulates the logic of composing SQL and writing data. Please
|
|||
pip3 install faster-fifo
|
||||
```
|
||||
|
||||
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
|
||||
3. Click the "Copy" in the above sample programs to copy `fast_write_example.py`, `sql_writer.py`, and `mockdatasource.py`.
|
||||
|
||||
4. Execute the program
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
### python Kafka 客户端
|
||||
### python Kafka client
|
||||
|
||||
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
|
||||
|
||||
|
@ -88,7 +88,7 @@ In addition to python's built-in multithreading and multiprocessing library, we
|
|||
<details>
|
||||
<summary>kafka_example_consumer</summary>
|
||||
|
||||
`kafka_example_consumer` is `consumer`,which is responsible for consuming data from kafka and writing it to TDengine.
|
||||
`kafka_example_consumer` is `consumer`, which is responsible for consuming data from kafka and writing it to TDengine.
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example_consumer.py}}
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
```rust
|
||||
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
|
||||
```
|
|
@ -20,10 +20,10 @@ import CAsync from "./_c_async.mdx";
|
|||
|
||||
## Introduction
|
||||
|
||||
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
||||
SQL is used by TDengine as its query language. Application programs can send SQL statements to TDengine through REST API or connectors. TDengine's CLI `taos` can also be used to execute ad hoc SQL queries. Here is the list of major query functionalities supported by TDengine:
|
||||
|
||||
- Query on single column or multiple columns
|
||||
- Filter on tags or data columns:>, <, =, <\>, like
|
||||
- Filter on tags or data columns: >, <, =, <\>, like
|
||||
- Grouping of results: `Group By` - Sorting of results: `Order By` - Limit the number of results: `Limit/Offset`
|
||||
- Windowed aggregate queries for time windows (interval), session windows (session), and state windows (state_window)
|
||||
- Arithmetic on columns of numeric types or aggregate results
|
||||
|
@ -160,7 +160,7 @@ In the section describing [Insert](/develop/insert-data/sql-writing), a database
|
|||
:::note
|
||||
|
||||
1. With either REST connection or native connection, the above sample code works well.
|
||||
2. Please note that `use db` can't be used in case of REST connection because it's stateless.
|
||||
2. Please note that `use db` can't be used in case of REST connection because it's stateless. You can specify the database name by either the REST endpoint's parameter or <db_name>.<table_name> in the SQL command.
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
|||
|
||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||
|
||||
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
|
||||
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
|
||||
## Data Schema and API
|
||||
|
||||
|
@ -105,6 +105,12 @@ class Consumer:
|
|||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def assignment(self):
|
||||
pass
|
||||
|
||||
def poll(self, timeout: float = 1.0):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
|
@ -222,7 +228,7 @@ A database including one supertable and two subtables is created as follows:
|
|||
|
||||
```sql
|
||||
DROP DATABASE IF EXISTS tmqdb;
|
||||
CREATE DATABASE tmqdb;
|
||||
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
|
||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
|
||||
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
||||
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
||||
|
@ -238,6 +244,8 @@ The following SQL statement creates a topic in TDengine:
|
|||
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
|
||||
```
|
||||
|
||||
- There is an upper limit to the number of topics created, controlled by the parameter tmqMaxTopicNum, with a default of 20
|
||||
|
||||
Multiple subscription types are supported.
|
||||
|
||||
#### Subscribe to a Column
|
||||
|
@ -259,14 +267,15 @@ You can subscribe to a topic through a SELECT statement. Statements that specify
|
|||
Syntax:
|
||||
|
||||
```sql
|
||||
CREATE TOPIC topic_name AS STABLE stb_name
|
||||
CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition]
|
||||
```
|
||||
|
||||
Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows:
|
||||
|
||||
- The table schema can be modified.
|
||||
- Unstructured data is returned. The format of the data returned changes based on the supertable schema.
|
||||
- A different table schema may exist for every data block to be processed.
|
||||
- The 'with meta' parameter is optional. When selected, statements such as creating super tables and sub tables will be returned, mainly used for Taosx to perform super table migration
|
||||
- The 'where_condition' parameter is optional and will be used to filter and subscribe to sub tables that meet the criteria. Where conditions cannot have ordinary columns, only tags or tbnames. Functions can be used in where conditions to filter tags, but cannot be aggregate functions because sub table tag values cannot be aggregated. It can also be a constant expression, such as 2>1 (subscribing to all child tables), Or false (subscribe to 0 sub tables)
|
||||
- The data returned does not include tags.
|
||||
|
||||
### Subscribe to a Database
|
||||
|
@ -274,10 +283,12 @@ Creating a topic in this manner differs from a `SELECT * from stbName` statement
|
|||
Syntax:
|
||||
|
||||
```sql
|
||||
CREATE TOPIC topic_name [WITH META] AS DATABASE db_name;
|
||||
CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
|
||||
```
|
||||
|
||||
This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka.
|
||||
This SQL statement creates a subscription to all tables in the database.
|
||||
|
||||
- The 'with meta' parameter is optional. When selected, it will return statements for creating all super tables and sub tables in the database, mainly used for Taosx database migration
|
||||
|
||||
## Create a Consumer
|
||||
|
||||
|
@ -285,16 +296,15 @@ You configure the following parameters when creating a consumer:
|
|||
|
||||
| Parameter | Type | Description | Remarks |
|
||||
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
||||
| `td.connect.ip` | string | IP address of the server side | |
|
||||
| `td.connect.user` | string | User Name | |
|
||||
| `td.connect.pass` | string | Password | |
|
||||
| `td.connect.port` | string | Port of the server side | |
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. |
|
||||
| `client.id` | string | Client ID | Maximum length: 192. |
|
||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
|
||||
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
|
||||
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
|
||||
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
|
||||
|
||||
The method of specifying these parameters depends on the language used:
|
||||
|
@ -312,7 +322,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
|
|||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
|
@ -327,6 +336,7 @@ Java programs use the following parameters:
|
|||
|
||||
| Parameter | Type | Description | Remarks |
|
||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `td.connect.type` | string | connection type: "jni" means native connection, "ws" means websocket connection, the default is "jni" |
|
||||
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
|
||||
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
|
||||
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
|
||||
|
@ -368,7 +378,6 @@ conf := &tmq.ConfigMap{
|
|||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
|
@ -402,23 +411,6 @@ from taos.tmq import Consumer
|
|||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
```
|
||||
|
||||
Python programs use the following parameters:
|
||||
|
||||
| Parameter | Type | Description | Remarks |
|
||||
|:---------:|:----:|:-----------:|:-------:|
|
||||
| `td.connect.ip` | string | Used in establishing a connection||
|
||||
| `td.connect.user` | string | Used in establishing a connection||
|
||||
| `td.connect.pass` | string | Used in establishing a connection||
|
||||
| `td.connect.port` | string | Used in establishing a connection||
|
||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
|
||||
| `client.id` | string | Client ID | Maximum length: 192 |
|
||||
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
|
||||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||
| `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
|
||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
|
|
|
@ -10,10 +10,10 @@ TDengine uses various kinds of caching techniques to efficiently write and query
|
|||
|
||||
TDengine uses an insert-driven cache management policy, known as first in, first out (FIFO). This policy differs from read-driven "least recently used (LRU)" cache management. A FIFO policy stores the latest data in cache and flushes the oldest data from cache to disk when the cache usage reaches a threshold. In IoT use cases, the most recent data or the current state is most important. The cache policy in TDengine, like much of the design and architecture of TDengine, is based on the nature of IoT data.
|
||||
|
||||
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode.
|
||||
When you create a database, you can configure the size of the write cache on each vnode. The **vgroups** parameter determines the number of vgroups that process data in the database, and the **buffer** parameter determines the size of the write cache for each vnode. The unit of buffer is MB.
|
||||
|
||||
```sql
|
||||
create database db0 vgroups 100 buffer 16MB
|
||||
create database db0 vgroups 100 buffer 16
|
||||
```
|
||||
|
||||
In theory, larger cache sizes are always better. However, at a certain point, it becomes impossible to improve performance by increasing cache size. In most scenarios, you can retain the default cache settings.
|
||||
|
@ -28,10 +28,10 @@ When you create a database, you can configure whether the latest data from every
|
|||
|
||||
## Metadata Cache
|
||||
|
||||
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters.
|
||||
To improve query and write performance, each vnode caches the metadata that it receives. When you create a database, you can configure the size of the metadata cache through the *pages* and *pagesize* parameters. The unit of pagesize is kb.
|
||||
|
||||
```sql
|
||||
create database db0 pages 128 pagesize 16kb
|
||||
create database db0 pages 128 pagesize 16
|
||||
```
|
||||
|
||||
The preceding SQL statement creates 128 pages on each vnode in the `db0` database. Each page has a 16 KB metadata cache.
|
||||
|
|
|
@ -6,18 +6,20 @@ description: This document describes how to create user-defined functions (UDF),
|
|||
|
||||
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
|
||||
|
||||
TDengine supports user-defined functions written in C or C++. This document describes the usage of user-defined functions.
|
||||
|
||||
User-defined functions can be scalar functions or aggregate functions. Scalar functions, such as `abs`, `sin`, and `concat`, output a value for every row of data. Aggregate functions, such as `avg` and `max` output one value for multiple rows of data.
|
||||
|
||||
TDengine supports user-defined functions written in C or Python. This document describes the usage of user-defined functions.
|
||||
|
||||
## Implement a UDF in C
|
||||
|
||||
When you create a user-defined function, you must implement standard interface functions:
|
||||
- For scalar functions, implement the `scalarfn` interface function.
|
||||
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
|
||||
- To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function.
|
||||
|
||||
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
|
||||
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
|
||||
|
||||
## Implementing a Scalar Function
|
||||
### Implementing a Scalar Function in C
|
||||
The implementation of a scalar function is described as follows:
|
||||
```c
|
||||
#include "taos.h"
|
||||
|
@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
|
|||
```
|
||||
Replace `scalarfn` with the name of your function.
|
||||
|
||||
## Implementing an Aggregate Function
|
||||
### Implementing an Aggregate Function in C
|
||||
|
||||
The implementation of an aggregate function is described as follows:
|
||||
```c
|
||||
|
@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
|
|||
```
|
||||
Replace `aggfn` with the name of your function.
|
||||
|
||||
## Interface Functions
|
||||
### UDF Interface Definition in C
|
||||
|
||||
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
|
||||
|
||||
|
@ -108,17 +110,16 @@ Interface functions return a value that indicates whether the operation was succ
|
|||
|
||||
For information about the parameters for interface functions, see Data Model
|
||||
|
||||
### Interfaces for Scalar Functions
|
||||
#### Scalar Interface
|
||||
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure.
|
||||
|
||||
The parameters in the function are defined as follows:
|
||||
- inputDataBlock: The data block to input.
|
||||
- resultColumn: The column to output. The column to output.
|
||||
- resultColumn: The column to output. The column to output.
|
||||
|
||||
### Interfaces for Aggregate Functions
|
||||
#### Aggregate Interface
|
||||
|
||||
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
|
||||
|
||||
|
@ -126,7 +127,7 @@ The parameters in the function are defined as follows:
|
|||
|
||||
`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
|
||||
|
||||
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and aggfn is called on each block to update the result. Finally, aggfn_finish is called to generate final results from the intermediate results. The final result contains only one or zero data points.
|
||||
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and the `aggfn` function is called on each block to update the result. Finally, aggfn_finish is called to generate the final results from the intermediate results. The final result contains only one or zero data points.
|
||||
|
||||
The parameters in the function are defined as follows:
|
||||
- interBuf: The intermediate result buffer.
|
||||
|
@ -135,15 +136,15 @@ The parameters in the function are defined as follows:
|
|||
- result: The final result.
|
||||
|
||||
|
||||
### Initializing and Terminating User-Defined Functions
|
||||
#### Initialization and Cleanup Interface
|
||||
`int32_t udf_init()`
|
||||
|
||||
`int32_t udf_destroy()`
|
||||
|
||||
Replace `udf`with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
|
||||
Replace `udf` with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
|
||||
|
||||
|
||||
## Data Structure of User-Defined Functions
|
||||
### Data Structures for UDF in C
|
||||
```c
|
||||
typedef struct SUdfColumnMeta {
|
||||
int16_t type;
|
||||
|
@ -193,17 +194,17 @@ typedef struct SUdfInterBuf {
|
|||
```
|
||||
The data structure is described as follows:
|
||||
|
||||
- The SUdfDataBlock block includes the number of rows (numOfRows) and number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
|
||||
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
|
||||
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
|
||||
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
|
||||
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
|
||||
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
|
||||
- SUdfInterBuf defines the intermediate structure `buffer` and the number of results in the buffer `numOfResult`.
|
||||
|
||||
Additional functions are defined in `taosudf.h` to make it easier to work with these structures.
|
||||
|
||||
## Compile UDF
|
||||
### Compiling C UDF
|
||||
|
||||
To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL).
|
||||
To use your user-defined function in TDengine, first, compile it to a shared library.
|
||||
|
||||
For example, the sample UDF `bit_and.c` can be compiled into a DLL as follows:
|
||||
|
||||
|
@ -213,12 +214,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
|
|||
|
||||
The generated DLL file `libbitand.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
|
||||
|
||||
## Manage and Use User-Defined Functions
|
||||
After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md).
|
||||
### UDF Sample Code in C
|
||||
|
||||
## Sample Code
|
||||
|
||||
### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
|
||||
#### Scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
|
||||
|
||||
The bit_and function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_and function ignores null values.
|
||||
|
||||
|
@ -231,7 +229,7 @@ The bit_and function implements bitwise addition for multiple columns. If there
|
|||
|
||||
</details>
|
||||
|
||||
### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
|
||||
#### Aggregate function 1: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
|
||||
|
||||
The l2norm function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
|
||||
|
||||
|
@ -243,3 +241,650 @@ The l2norm function finds the second-order norm for all data in the input column
|
|||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### Aggregate function 2: [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
|
||||
|
||||
The max_vol function returns a string concatenating the deviceId column, the row number and column number of the maximum voltage and the maximum voltage given several voltage columns as input.
|
||||
|
||||
Create Table:
|
||||
```bash
|
||||
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
|
||||
```
|
||||
Create the UDF:
|
||||
```bash
|
||||
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
||||
```
|
||||
Use the UDF in the query:
|
||||
```bash
|
||||
select max_vol(vol1,vol2,vol3,deviceid) from battery;
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>max_vol.c</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/max_vol.c}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Implement a UDF in Python
|
||||
|
||||
### Prepare Environment
|
||||
|
||||
1. Prepare Python Environment
|
||||
|
||||
Please follow standard procedure of python environment preparation.
|
||||
|
||||
2. Install Python package `taospyudf`
|
||||
|
||||
```shell
|
||||
pip3 install taospyudf
|
||||
```
|
||||
|
||||
During this process, some C++ code needs to be compiled. So it's required to have `cmake` and `gcc` on your system. The compiled `libtaospyudf.so` will be automatically copied to `/usr/local/lib` path. If you are not root user, please use `sudo`. After installation is done, please check using the command below.
|
||||
|
||||
```shell
|
||||
root@slave11 ~/udf $ ls -l /usr/local/lib/libtaos*
|
||||
-rw-r--r-- 1 root root 671344 May 24 22:54 /usr/local/lib/libtaospyudf.so
|
||||
```
|
||||
|
||||
Then execute the command below.
|
||||
|
||||
```shell
|
||||
ldconfig
|
||||
```
|
||||
|
||||
3. If you want to utilize some 3rd party python packages in your Python UDF, please set configuration parameter `UdfdLdLibPath` to the value of `PYTHONPATH` before starting `taosd`.
|
||||
|
||||
4. Launch `taosd` service
|
||||
|
||||
Please refer to [Get Started](../../get-started)
|
||||
|
||||
### Interface definition
|
||||
|
||||
#### Introduction to Interface
|
||||
|
||||
Implement the specified interface functions when implementing a UDF in Python.
|
||||
- implement `process` function for the scalar UDF.
|
||||
- implement `start`, `reduce`, `finish` for the aggregate UDF.
|
||||
- implement `init` for initialization and `destroy` for termination.
|
||||
|
||||
#### Scalar UDF Interface
|
||||
|
||||
The implementation of a scalar UDF is described as follows:
|
||||
|
||||
```Python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
|
||||
Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
|
||||
|
||||
#### Aggregate UDF Interface
|
||||
|
||||
The implementation of an aggregate function is described as follows:
|
||||
|
||||
```Python
|
||||
def start() -> bytes:
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
```
|
||||
|
||||
Description: first the start() is invoked to generate the initial result `buffer`; then the input data is divided into multiple row blocks, and reduce() is invoked for each block `inputs` and current intermediate result `buf`; finally finish() is invoked to generate the final result from intermediate `buf`, the final result can only contains 0 or 1 data.
|
||||
|
||||
#### Initialization and Cleanup Interface
|
||||
|
||||
```python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
|
||||
Description: init() does the work of initialization before processing any data; destroy() does the work of cleanup after the data is processed.
|
||||
|
||||
### Python UDF Template
|
||||
|
||||
#### Scalar Template
|
||||
|
||||
```Python
|
||||
def init():
|
||||
# initialization
|
||||
def destroy():
|
||||
# destroy
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
# process input datablock,
|
||||
# datablock.data(row, col) is to access the python object in location(row,col)
|
||||
# return tuple object consisted of object of type outputtype
|
||||
```
|
||||
|
||||
Note:process() must be implemented, init() and destroy() must be defined too but they can do nothing.
|
||||
|
||||
#### Aggregate Template
|
||||
|
||||
```Python
|
||||
def init():
|
||||
#initialization
|
||||
def destroy():
|
||||
#destroy
|
||||
def start() -> bytes:
|
||||
#return serialize(init_state)
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
# deserialize buf to state
|
||||
# reduce the inputs and state into new_state.
|
||||
# use inputs.data(i,j) to access python object of location(i,j)
|
||||
# serialize new_state into new_state_bytes
|
||||
return new_state_bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
#return obj of type outputtype
|
||||
```
|
||||
|
||||
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
|
||||
|
||||
### Data Mapping between TDengine SQL and Python UDF
|
||||
|
||||
The following table describes the mapping between TDengine SQL data type and Python UDF Data Type. The `NULL` value of all TDengine SQL types is mapped to the `None` value in Python.
|
||||
|
||||
| **TDengine SQL Data Type** | **Python Data Type** |
|
||||
| :-----------------------: | ------------ |
|
||||
|TINYINT / SMALLINT / INT / BIGINT | int |
|
||||
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|
||||
|FLOAT / DOUBLE | float |
|
||||
|BOOL | bool |
|
||||
|BINARY / VARCHAR / NCHAR | bytes|
|
||||
|TIMESTAMP | int |
|
||||
|JSON and other types | Not Supported |
|
||||
|
||||
### Development Guide
|
||||
|
||||
In this section we will demonstrate 5 examples of developing UDF in Python language. In this guide, you will learn the development skills from easy case to hard case, the examples include:
|
||||
1. A scalar function which accepts only one integer as input and outputs ln(n^2 + 1)。
|
||||
2. A scalar function which accepts n integers, like(x1, x2, ..., xn)and output the sum of the product of each input and its sequence number, i.e. x1 + 2 * x2 + ... + n * xn。
|
||||
3. A scalar function which accepts a timestamp and output the next closest Sunday of the timestamp. In this case, we will demonstrate how to use 3rd party library `moment`.
|
||||
4. An aggregate function which calculates the difference between the maximum and the minimum of a specific column, i.e. same functionality of built-in spread().
|
||||
|
||||
In the guide, some debugging skills of using Python UDF will be explained too.
|
||||
|
||||
We assume you are using Linux system and already have TDengine 3.0.4.0+ and Python 3.x.
|
||||
|
||||
Note:**You can't use print() function to output log inside a UDF, you have to write the log to a specific file or use logging module of Python.**
|
||||
|
||||
#### Sample 1: Simplest UDF
|
||||
|
||||
This scalar UDF accepts an integer as input and output ln(n^2 + 1).
|
||||
|
||||
Firstly, please compose a Python source code file in your system and save it, e.g. `/root/udf/myfun.py`, the code is like below.
|
||||
|
||||
```python
|
||||
from math import log
|
||||
|
||||
def init():
|
||||
pass
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
def process(block):
|
||||
rows, _ = block.shape()
|
||||
return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||
```
|
||||
|
||||
This program consists of 3 functions, init() and destroy() do nothing, but they have to be defined even though there is nothing to do in them because they are critical parts of a python UDF. The most important function is process(), which accepts a data block and the data block object has two methods:
|
||||
1. shape() returns the number of rows and the number of columns of the data block
|
||||
2. data(i, j) returns the value at (i,j) in the block
|
||||
|
||||
The output of the process() function of a scalar UDF returns exactly same number of data as the number of input rows. We will ignore the number of columns because we just want to compute on the first column.
|
||||
|
||||
Then, we create the UDF using the SQL command below.
|
||||
|
||||
```sql
|
||||
create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
|
||||
```
|
||||
|
||||
Here is the output example, it may change a little depending on your version being used.
|
||||
|
||||
```shell
|
||||
taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||
Create OK, 0 row(s) affected (0.005202s)
|
||||
```
|
||||
|
||||
Then, we used the `show` command to prove the creation of the UDF is successful.
|
||||
|
||||
```text
|
||||
taos> show functions;
|
||||
name |
|
||||
=================================
|
||||
myfun |
|
||||
Query OK, 1 row(s) in set (0.005767s)
|
||||
```
|
||||
|
||||
Next, we can try to test the function. Before executing the UDF, we need to prepare some data using the command below in TDengine CLI.
|
||||
|
||||
```sql
|
||||
create database test;
|
||||
create table t(ts timestamp, v1 int, v2 int, v3 int);
|
||||
insert into t values('2023-05-01 12:13:14', 1, 2, 3);
|
||||
insert into t values('2023-05-03 08:09:10', 2, 3, 4);
|
||||
insert into t values('2023-05-10 07:06:05', 3, 4, 5);
|
||||
```
|
||||
|
||||
Execute the UDF to test it:
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
|
||||
DB error: udf function execution failure (0.011088s)
|
||||
```
|
||||
|
||||
Unfortunately, the UDF execution failed. We need to check the log `udfd` daemon to find out why.
|
||||
|
||||
```shell
|
||||
tail -10 /var/log/taos/udfd.log
|
||||
```
|
||||
|
||||
Below is the output.
|
||||
|
||||
```text
|
||||
05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
|
||||
05/24 22:46:28.733561 01665799 UDF ERROR can not load python plugin. lib path libtaospyudf.so
|
||||
```
|
||||
|
||||
From the error message we can find out that `libtaospyudf.so` was not loaded successfully. Please refer to the [Prepare Environment] section.
|
||||
|
||||
After correcting environment issues, execute the UDF:
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1) from t;
|
||||
myfun(v1) |
|
||||
============================
|
||||
0.693147181 |
|
||||
1.609437912 |
|
||||
2.302585093 |
|
||||
```
|
||||
|
||||
Now, we have finished the first PDF in Python, and learned some basic debugging skills.
|
||||
|
||||
#### Sample 2: Abnormal Processing
|
||||
|
||||
The `myfun` UDF example in sample 1 has passed, but it has two drawbacks.
|
||||
|
||||
1. It the program accepts only one column of data as input, but it doesn't throw exception if you passes multiple columns.
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
myfun(v1, v2) |
|
||||
============================
|
||||
0.693147181 |
|
||||
1.609437912 |
|
||||
2.302585093 |
|
||||
```
|
||||
|
||||
2. `null` value is not processed. We expect the program to throw exception and terminate if `null` is passed as input.
|
||||
|
||||
So, we try to optimize the process() function as below.
|
||||
|
||||
```python
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
if cols > 1:
|
||||
raise Exception(f"require 1 parameter but given {cols}")
|
||||
return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
|
||||
```
|
||||
|
||||
The update the UDF with command below.
|
||||
|
||||
```sql
|
||||
create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
|
||||
```
|
||||
|
||||
At this time, if we pass two arguments to `myfun`, the execution would fail.
|
||||
|
||||
```sql
|
||||
taos> select myfun(v1, v2) from t;
|
||||
|
||||
DB error: udf function execution failure (0.014643s)
|
||||
```
|
||||
|
||||
However, the exception is not shown to end user, but displayed in the log file `/var/log/taos/taospyudf.log`
|
||||
|
||||
```text
|
||||
2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
|
||||
|
||||
At:
|
||||
/var/lib/taos//.udf/myfun_3_1884e1281d9.py(12): process
|
||||
|
||||
```
|
||||
|
||||
Now, we have learned how to update a UDF and check the log of a UDF.
|
||||
|
||||
Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart `taosd` service. After 3.0.5.0, restarting is not required.
|
||||
|
||||
#### Sample 3: UDF with n arguments
|
||||
|
||||
A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
|
||||
|
||||
```python
|
||||
def init():
|
||||
pass
|
||||
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
result = []
|
||||
for i in range(rows):
|
||||
total = 0
|
||||
for j in range(cols):
|
||||
v = block.data(i, j)
|
||||
if v is None:
|
||||
total = None
|
||||
break
|
||||
total += (j + 1) * block.data(i, j)
|
||||
result.append(total)
|
||||
return result
|
||||
```
|
||||
|
||||
Crate and test the UDF:
|
||||
|
||||
```sql
|
||||
create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
|
||||
```
|
||||
|
||||
```sql
|
||||
taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
|
||||
Insert OK, 1 row(s) affected (0.003675s)
|
||||
|
||||
taos> select ts, v1, v2, v3, nsum(v1, v2, v3) from t;
|
||||
ts | v1 | v2 | v3 | nsum(v1, v2, v3) |
|
||||
================================================================================================
|
||||
2023-05-01 12:13:14.000 | 1 | 2 | 3 | 14.000000000 |
|
||||
2023-05-03 08:09:10.000 | 2 | 3 | 4 | 20.000000000 |
|
||||
2023-05-10 07:06:05.000 | 3 | 4 | 5 | 26.000000000 |
|
||||
2023-05-25 09:09:15.000 | 6 | NULL | 8 | NULL |
|
||||
Query OK, 4 row(s) in set (0.010653s)
|
||||
```
|
||||
|
||||
#### Sample 4: Utilize 3rd party package
|
||||
|
||||
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly.
|
||||
|
||||
```shell
|
||||
pip3 install moment
|
||||
```
|
||||
|
||||
Then compose the Python code in /root/udf/nextsunday.py
|
||||
|
||||
```python
|
||||
import moment
|
||||
|
||||
|
||||
def init():
|
||||
pass
|
||||
|
||||
|
||||
def destroy():
|
||||
pass
|
||||
|
||||
|
||||
def process(block):
|
||||
rows, cols = block.shape()
|
||||
if cols > 1:
|
||||
raise Exception("require only 1 parameter")
|
||||
if not type(block.data(0, 0)) is int:
|
||||
raise Exception("type error")
|
||||
return [moment.unix(block.data(i, 0)).replace(weekday=7).format('YYYY-MM-DD')
|
||||
for i in range(rows)]
|
||||
```
|
||||
|
||||
UDF framework will map the TDengine timestamp to Python int type, so this function only accepts an integer representing millisecond. process() firstly validates the parameters, then use `moment` to replace the time, format the result and output.
|
||||
|
||||
Create and test the UDF.
|
||||
|
||||
```sql
|
||||
create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
|
||||
```
|
||||
|
||||
If your `taosd` is started using `systemd`, you may encounter the error below. Next we will show how to debug.
|
||||
|
||||
```sql
|
||||
taos> select ts, nextsunday(ts) from t;
|
||||
|
||||
DB error: udf function execution failure (1.123615s)
|
||||
```
|
||||
|
||||
```shell
|
||||
tail -20 taospyudf.log
|
||||
2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
|
||||
```
|
||||
|
||||
This is because `moment` doesn't exist in the default library search path of python UDF, please check the log file `taosdpyudf.log`.
|
||||
|
||||
```shell
|
||||
grep 'sys path' taospyudf.log | tail -1
|
||||
```
|
||||
|
||||
```text
|
||||
2023-05-25 10:58:48.554 INFO [1679419] [doPyOpen@592] python sys path: ['', '/lib/python38.zip', '/lib/python3.8', '/lib/python3.8/lib-dynload', '/lib/python3/dist-packages', '/var/lib/taos//.udf']
|
||||
```
|
||||
|
||||
You may find that the default library search path is `/lib/python3/dist-packages` (just for example, it may be different in your system), but `moment` is installed to `/usr/local/lib/python3.8/dist-packages` (for example, it may be different in your system). Then we change the library search path of python UDF.
|
||||
|
||||
Check `sys.path`, which must include the packages you install with pip3 command previously, as shown below:
|
||||
|
||||
```python
|
||||
>>> import sys
|
||||
>>> ":".join(sys.path)
|
||||
'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
|
||||
```
|
||||
|
||||
Copy the output and edit /var/taos/taos.cfg to add below configuration parameter.
|
||||
|
||||
```shell
|
||||
UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
|
||||
```
|
||||
|
||||
Save it, then restart `taosd`, using `systemctl restart taosd`, and test again, it will succeed this time.
|
||||
|
||||
Note: If your cluster consists of multiple `taosd` instances, you have to repeat same process for each of them.
|
||||
|
||||
```sql
|
||||
taos> select ts, nextsunday(ts) from t;
|
||||
ts | nextsunday(ts) |
|
||||
===========================================
|
||||
2023-05-01 12:13:14.000 | 2023-05-07 |
|
||||
2023-05-03 08:09:10.000 | 2023-05-07 |
|
||||
2023-05-10 07:06:05.000 | 2023-05-14 |
|
||||
2023-05-25 09:09:15.000 | 2023-05-28 |
|
||||
Query OK, 4 row(s) in set (1.011474s)
|
||||
```
|
||||
|
||||
#### Sample 5: Aggregate Function
|
||||
|
||||
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`.
|
||||
|
||||
```python
|
||||
import io
|
||||
import math
|
||||
import pickle
|
||||
|
||||
LOG_FILE: io.TextIOBase = None
|
||||
|
||||
|
||||
def init():
|
||||
global LOG_FILE
|
||||
LOG_FILE = open("/var/log/taos/spread.log", "wt")
|
||||
log("init function myspead success")
|
||||
|
||||
|
||||
def log(o):
|
||||
LOG_FILE.write(str(o) + '\n')
|
||||
|
||||
|
||||
def destroy():
|
||||
log("close log file: spread.log")
|
||||
LOG_FILE.close()
|
||||
|
||||
|
||||
def start():
|
||||
return pickle.dumps((-math.inf, math.inf))
|
||||
|
||||
|
||||
def reduce(block, buf):
|
||||
max_number, min_number = pickle.loads(buf)
|
||||
log(f"initial max_number={max_number}, min_number={min_number}")
|
||||
rows, _ = block.shape()
|
||||
for i in range(rows):
|
||||
v = block.data(i, 0)
|
||||
if v > max_number:
|
||||
log(f"max_number={v}")
|
||||
max_number = v
|
||||
if v < min_number:
|
||||
log(f"min_number={v}")
|
||||
min_number = v
|
||||
return pickle.dumps((max_number, min_number))
|
||||
|
||||
|
||||
def finish(buf):
|
||||
max_number, min_number = pickle.loads(buf)
|
||||
return max_number - min_number
|
||||
```
|
||||
|
||||
In this example, we implemented an aggregate function, and added some logging.
|
||||
1. init() opens a file for logging
|
||||
2. log() is the function for logging, it converts the input object to string and output with an end of line
|
||||
3. destroy() closes the log file \
|
||||
4. start() returns the initial buffer for storing the intermediate result
|
||||
5. reduce() processes each data block and aggregates the result
|
||||
6. finish() converts the final buffer() to final result\
|
||||
|
||||
Create the UDF.
|
||||
|
||||
```sql
|
||||
create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
|
||||
```
|
||||
|
||||
This SQL command has two important different points from the command creating scalar UDF.
|
||||
1. keyword `aggregate` is used
|
||||
2. keyword `bufsize` is used to specify the memory size for storing the intermediate result. In this example, the result is 32 bytes, but we specified 128 bytes for `bufsize`. You can use the `python` CLI to print actual size.
|
||||
|
||||
```python
|
||||
>>> len(pickle.dumps((12345.6789, 23456789.9877)))
|
||||
32
|
||||
```
|
||||
|
||||
Test this function, you can see the result is same as built-in spread() function. \
|
||||
|
||||
```sql
|
||||
taos> select myspread(v1) from t;
|
||||
myspread(v1) |
|
||||
============================
|
||||
5.000000000 |
|
||||
Query OK, 1 row(s) in set (0.013486s)
|
||||
|
||||
taos> select spread(v1) from t;
|
||||
spread(v1) |
|
||||
============================
|
||||
5.000000000 |
|
||||
Query OK, 1 row(s) in set (0.005501s)
|
||||
```
|
||||
|
||||
At last, check the log file, we can see that the reduce() function is executed 3 times, max value is updated 3 times and min value is updated only one time.
|
||||
|
||||
```shell
|
||||
root@slave11 /var/log/taos $ cat spread.log
|
||||
init function myspead success
|
||||
initial max_number=-inf, min_number=inf
|
||||
max_number=1
|
||||
min_number=1
|
||||
initial max_number=1, min_number=1
|
||||
max_number=2
|
||||
max_number=3
|
||||
initial max_number=3, min_number=1
|
||||
max_number=6
|
||||
close log file: spread.log
|
||||
```
|
||||
|
||||
### SQL Commands
|
||||
|
||||
1. Create Scalar UDF
|
||||
|
||||
```sql
|
||||
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
2. Create Aggregate UDF
|
||||
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
3. Update Scalar UDF
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE FUNCTION function_name AS OUTPUTTYPE int LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
4. Update Aggregate UDF
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE AGGREGATE FUNCTION function_name AS OUTPUTTYPE BUFSIZE buf_size int LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
Note: If keyword `AGGREGATE` used, the UDF will be treated as aggregate UDF despite what it was before; Similarly, if there is no keyword `aggregate`, the UDF will be treated as scalar function despite what it was before.
|
||||
|
||||
5. Show the UDF
|
||||
|
||||
The version of a UDF is increased by one every time it's updated.
|
||||
|
||||
```sql
|
||||
select * from ins_functions \G;
|
||||
```
|
||||
|
||||
6. Show and Drop existing UDF
|
||||
|
||||
```sql
|
||||
SHOW functions;
|
||||
DROP FUNCTION function_name;
|
||||
```
|
||||
|
||||
### More Python UDF Samples
|
||||
|
||||
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
|
||||
|
||||
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
|
||||
|
||||
<details>
|
||||
<summary>pybitand.py</summary>
|
||||
|
||||
```Python
|
||||
{{#include tests/script/sh/pybitand.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
||||
|
||||
The `pyl2norm` function finds the second-order norm for all data in the input columns. This squares the values, takes a cumulative sum, and finds the square root.
|
||||
<details>
|
||||
<summary>pyl2norm.py</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/pyl2norm.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### Aggregate Function [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
|
||||
|
||||
The `pycumsum` function finds the cumulative sum for all data in the input columns.
|
||||
<details>
|
||||
<summary>pycumsum.py</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/pycumsum.py}}
|
||||
```
|
||||
|
||||
</details>
|
||||
## Manage and Use UDF
|
||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
||||
|
|
|
@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
|
|||
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
|
||||
- Internal function `NOW` can be used to get the current timestamp on the client side.
|
||||
- The current timestamp of the client side is applied when `NOW` is used to insert data.
|
||||
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||
- Epoch Time: timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
|
||||
- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
|
||||
|
||||
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
|
||||
|
@ -24,29 +24,28 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
|
||||
In TDengine, the data types below can be used when specifying a column or tag.
|
||||
|
||||
| # | **type** | **Bytes** | **Description** |
|
||||
| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||
| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
|
||||
| # | **type** | **Bytes** | **Description** |
|
||||
| --- | :---------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
|
||||
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
|
||||
| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
|
||||
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
|
||||
| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
|
||||
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
|
||||
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
|
||||
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
|
||||
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
|
||||
| 10 | SMALLINT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
|
||||
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
|
||||
| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
|
||||
| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
|
||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
:::note
|
||||
|
||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
- The length of BINARY can be up to 16,374(data column is 65,517 and tag column is 16,382 since version 3.0.5.0) bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||
|
||||
:::
|
||||
|
|
|
@ -72,8 +72,8 @@ database_option: {
|
|||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
|
||||
- TABLE_SUFFIX: The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
|
||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
|
||||
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
|
||||
|
|
|
@ -45,7 +45,7 @@ table_option: {
|
|||
|
||||
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
|
||||
2. The maximum length of the table name is 192 bytes.
|
||||
3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
|
||||
3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
|
||||
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
|
||||
5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
|
||||
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
|
||||
|
|
|
@ -33,7 +33,7 @@ column_definition:
|
|||
SHOW STABLES [LIKE tb_name_wildcard];
|
||||
```
|
||||
|
||||
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
|
||||
The preceding SQL statement shows all supertables in the current TDengine database.
|
||||
|
||||
### View the CREATE Statement for a Supertable
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ One or multiple rows can be inserted into multiple tables in a single SQL statem
|
|||
|
||||
```sql
|
||||
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||
```
|
||||
|
||||
## Automatically Create Table When Inserting
|
||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
|||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
|
||||
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY expr [, expr] ...
|
||||
|
@ -373,7 +373,7 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
For sub-table and super table:
|
||||
For sub-table and super table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
|
|
@ -6,14 +6,14 @@ description: Use Tag Index to Improve Query Performance
|
|||
|
||||
## Introduction
|
||||
|
||||
Prior to TDengine 3.0.3.0 (excluded),only one index is created by default on the first tag of each super talbe, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
||||
Prior to TDengine 3.0.3.0 (excluded), only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
|
||||
|
||||
## Syntax
|
||||
|
||||
1. The syntax of creating an index
|
||||
|
||||
```sql
|
||||
CREATE INDEX index_name ON tbl_name (tagColName)
|
||||
CREATE INDEX index_name ON tbl_name (tagColName)
|
||||
```
|
||||
|
||||
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
|
||||
|
@ -48,4 +48,4 @@ You can also add filter conditions to limit the results.
|
|||
|
||||
6. You can' create index on a normal table or a child table.
|
||||
|
||||
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
|
||||
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
|
||||
|
|
|
@ -5,9 +5,9 @@ description: This document describes the standard SQL functions available in TDe
|
|||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
## Single Row Functions
|
||||
## Scalar Functions
|
||||
|
||||
Single row functions return a result for each row.
|
||||
Scalar functions return one result for each row.
|
||||
|
||||
### Mathematical Functions
|
||||
|
||||
|
@ -434,7 +434,7 @@ TO_ISO8601(expr [, timezone])
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
|
||||
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]. For example, TO_ISO8601(1, "+00:00").
|
||||
- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
|
||||
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
|
||||
|
||||
|
@ -459,12 +459,17 @@ TO_JSON(str_literal)
|
|||
#### TO_UNIXTIMESTAMP
|
||||
|
||||
```sql
|
||||
TO_UNIXTIMESTAMP(expr)
|
||||
TO_UNIXTIMESTAMP(expr [, return_timestamp])
|
||||
|
||||
return_timestamp: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Description**: UNIX timestamp converted from a string of date/time format
|
||||
|
||||
**Return value type**: BIGINT
|
||||
**Return value type**: BIGINT, TIMESTAMP
|
||||
|
||||
**Applicable column types**: VARCHAR and NCHAR
|
||||
|
||||
|
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
|
|||
|
||||
- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted
|
||||
- The precision of the returned timestamp is same as the precision set for the current data base in use
|
||||
- return_timestamp indicates whether the returned value type is TIMESTAMP or not. If this parameter set to 1, function will return TIMESTAMP type. Otherwise function will return BIGINT type. If parameter is omitted, default return value type is BIGINT.
|
||||
|
||||
|
||||
### Time and Date Functions
|
||||
|
@ -620,7 +626,7 @@ algo_type: {
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**Explanations**:
|
||||
**Explanations**:
|
||||
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
|
||||
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
|
||||
- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
|
||||
|
@ -666,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
|
|||
ELAPSED(ts_primary_key [, time_unit])
|
||||
```
|
||||
|
||||
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
|
||||
|
||||
**Return value type**: Double if the input value is not NULL;
|
||||
|
||||
|
@ -674,7 +680,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
|||
|
||||
**Applicable tables**: table, STable, outer in nested query
|
||||
|
||||
**Explanations**:
|
||||
**Explanations**:
|
||||
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
|
||||
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
|
||||
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
||||
|
@ -752,7 +758,7 @@ SUM(expr)
|
|||
HYPERLOGLOG(expr)
|
||||
```
|
||||
|
||||
**Description**:
|
||||
**Description**:
|
||||
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
|
||||
However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
|
||||
|
||||
|
@ -766,10 +772,10 @@ HYPERLOGLOG(expr)
|
|||
### HISTOGRAM
|
||||
|
||||
```sql
|
||||
HISTOGRAM(expr,bin_type, bin_description, normalized)
|
||||
HISTOGRAM(expr, bin_type, bin_description, normalized)
|
||||
```
|
||||
|
||||
**Description**:Returns count of data points in user-specified ranges.
|
||||
**Description**: Returns count of data points in user-specified ranges.
|
||||
|
||||
**Return value type** If normalized is set to 1, a DOUBLE is returned; otherwise a BIGINT is returned
|
||||
|
||||
|
@ -777,18 +783,18 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
|
|||
|
||||
**Applicable table types**: table, STable
|
||||
|
||||
**Explanations**:
|
||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
|
||||
- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
|
||||
**Explanations**:
|
||||
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin".
|
||||
- bin_description: parameter to describe how to generate buckets can be in the following JSON formats for each bin_type respectively:
|
||||
- "user_input": "[1, 3, 5, 7]":
|
||||
User specified bin values.
|
||||
|
||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
|
||||
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated set of bins.
|
||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
|
||||
|
||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
|
||||
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add (-inf, inf) as start/end point in generated range of bins.
|
||||
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
|
||||
- normalized: setting to 1/0 to turn on/off result normalization. Valid values are 0 or 1.
|
||||
|
||||
|
@ -861,10 +867,16 @@ FIRST(expr)
|
|||
### INTERP
|
||||
|
||||
```sql
|
||||
INTERP(expr)
|
||||
INTERP(expr [, ignore_null_values])
|
||||
|
||||
ignore_null_values: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. The value of `ignore_null_values` can be 0 or 1, 1 means null values are ignored. The default value of this parameter is 0.
|
||||
|
||||
|
||||
**Return value type**: Same as the column being operated upon
|
||||
|
||||
|
@ -877,10 +889,11 @@ INTERP(expr)
|
|||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
|
||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||
|
||||
|
@ -890,7 +903,7 @@ INTERP(expr)
|
|||
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
|
||||
|
||||
```sql
|
||||
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
|
||||
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
|
||||
```
|
||||
|
||||
### LAST
|
||||
|
@ -986,19 +999,14 @@ SAMPLE(expr, k)
|
|||
|
||||
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
|
||||
|
||||
**Return value type**: Same as the column being operated plus the associated timestamp
|
||||
**Return value type**: Same as the column being operated
|
||||
|
||||
**Applicable data types**: Any data type except for tags of STable
|
||||
**Applicable data types**: Any data type
|
||||
|
||||
**Applicable nested query**: Inner query and Outer query
|
||||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
**More explanations**:
|
||||
|
||||
This function cannot be used in expression calculation.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
|
||||
### TAIL
|
||||
|
||||
|
@ -1043,11 +1051,11 @@ TOP(expr, k)
|
|||
UNIQUE(expr)
|
||||
```
|
||||
|
||||
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used.
|
||||
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword.
|
||||
|
||||
**Return value type**:Same as the data type of the column being operated upon
|
||||
|
||||
**Applicable column types**: Any data types except for timestamp
|
||||
**Applicable column types**: Any data types
|
||||
|
||||
**Applicable table types**: table, STable
|
||||
|
||||
|
@ -1076,7 +1084,6 @@ CSUM(expr)
|
|||
|
||||
- Arithmetic operation can't be performed on the result of `csum` function
|
||||
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
|
||||
### DERIVATIVE
|
||||
|
@ -1100,8 +1107,7 @@ ignore_negative: {
|
|||
|
||||
**More explanation**:
|
||||
|
||||
- It can be used together with `PARTITION BY tbname` against a STable.
|
||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
|
||||
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
|
||||
|
||||
### DIFF
|
||||
|
||||
|
@ -1125,7 +1131,7 @@ ignore_negative: {
|
|||
**More explanation**:
|
||||
|
||||
- The number of result rows is the number of rows subtracted by one, no output for the first row
|
||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
|
||||
- It can be used together with a selected column. For example: select \_rowts, DIFF() from.
|
||||
|
||||
|
||||
### IRATE
|
||||
|
@ -1163,7 +1169,6 @@ MAVG(expr, k)
|
|||
|
||||
- Arithmetic operation can't be performed on the result of `MAVG`.
|
||||
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
|
||||
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
|
||||
|
||||
|
||||
### STATECOUNT
|
||||
|
@ -1177,7 +1182,7 @@ STATECOUNT(expr, oper, val)
|
|||
**Applicable parameter values**:
|
||||
|
||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||
- val : Numeric types
|
||||
- val: Numeric types
|
||||
|
||||
**Return value type**: Integer
|
||||
|
||||
|
@ -1189,7 +1194,6 @@ STATECOUNT(expr, oper, val)
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
|
||||
- Can't be used with window operation, like interval/state_window/session_window
|
||||
|
||||
|
||||
|
@ -1204,7 +1208,7 @@ STATEDURATION(expr, oper, val, unit)
|
|||
**Applicable parameter values**:
|
||||
|
||||
- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
|
||||
- val : Numeric types
|
||||
- val: Numeric types
|
||||
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
|
||||
|
||||
**Return value type**: Integer
|
||||
|
@ -1217,7 +1221,6 @@ STATEDURATION(expr, oper, val, unit)
|
|||
|
||||
**More explanations**:
|
||||
|
||||
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
|
||||
- Can't be used with window operation, like interval/state_window/session_window
|
||||
|
||||
|
||||
|
@ -1235,7 +1238,6 @@ TWA(expr)
|
|||
|
||||
**Applicable table types**: standard tables and supertables
|
||||
|
||||
- Must be used together with `PARTITION BY tbname` to force the result into each single timeline.
|
||||
|
||||
|
||||
## System Information Functions
|
||||
|
|
|
@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct
|
|||
A PARTITION BY clause is processed as follows:
|
||||
|
||||
- The PARTITION BY clause must occur after the WHERE clause
|
||||
- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
||||
- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
|
||||
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
|
||||
|
||||
```sql
|
||||
|
@ -69,19 +69,20 @@ These pseudocolumns occur after the aggregation clause.
|
|||
`FILL` clause is used to specify how to fill when there is data missing in any window, including:
|
||||
|
||||
1. NONE: No fill (the default fill mode)
|
||||
2. VALUE:Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
||||
3. PREV:Fill with the previous non-NULL value, `FILL(PREV)`
|
||||
4. NULL:Fill with NULL, `FILL(NULL)`
|
||||
5. LINEAR:Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||
6. NEXT:Fill with the next non-NULL value, `FILL(NEXT)`
|
||||
2. VALUE: Fill with a fixed value, which should be specified together, for example `FILL(VALUE, 1.23)` Note: The value filled depends on the data type. For example, if you run FILL(VALUE 1.23) on an integer column, the value 1 is filled.
|
||||
3. PREV: Fill with the previous non-NULL value, `FILL(PREV)`
|
||||
4. NULL: Fill with NULL, `FILL(NULL)`
|
||||
5. LINEAR: Fill with the closest non-NULL value, `FILL(LINEAR)`
|
||||
6. NEXT: Fill with the next non-NULL value, `FILL(NEXT)`
|
||||
|
||||
In the above filling modes, except for `NONE` mode, the `fill` clause will be ignored if there is no data in the defined time range, i.e. no data would be filled and the query result would be empty. This behavior is reasonable when the filling mode is `PREV`, `NEXT`, `LINEAR`, because filling can't be performed if there is not any data. For filling modes `NULL` and `VALUE`, however, filling can be performed even though there is not any data, filling or not depends on the choice of user's application. To accomplish the need of this force filling behavior and not break the behavior of existing filling modes, TDengine added two new filling modes since version 3.0.3.0.
|
||||
|
||||
1. NULL_F: Fill `NULL` by force
|
||||
2. VALUE_F: Fill `VALUE` by force
|
||||
|
||||
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
||||
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force;`NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
||||
The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described below:
|
||||
|
||||
- When used with `INTERVAL`: `NULL_F` and `VALUE_F` are filling by force; `NULL` and `VALUE` don't fill by force. The behavior of each filling mode is exactly same as what the name suggests.
|
||||
- When used with `INTERVAL` in stream processing: `NULL_F` and `NULL` are same, i.e. don't fill by force; `VALUE_F` and `VALUE` and same, i.e. don't fill by force. It's suggested that there is no filling by force in stream processing.
|
||||
- When used with `INTERP`: `NULL` and `NULL_F` and same, i.e. filling by force; `VALUE` and `VALUE_F` are same, i.e. filling by force. It's suggested that there is always filling by force when used with `INTERP`.
|
||||
|
||||
|
@ -97,7 +98,7 @@ The detailed beaviors of `NULL`, `NULL_F`, `VALUE`, and VALUE_F are described be
|
|||
|
||||
There are two kinds of time windows: sliding window and flip time/tumbling window.
|
||||
|
||||
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] ,[t1s , t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
||||
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e], [t1s, t1e], [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
|
||||
|
||||

|
||||
|
||||
|
@ -121,7 +122,7 @@ Please note that the `timezone` parameter should be configured to be the same va
|
|||
|
||||
### State Window
|
||||
|
||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].
|
||||
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07, 2019-04-28 14:22:10] and [2019-04-28 14:22:11, 2019-04-28 14:22:12].
|
||||
|
||||

|
||||
|
||||
|
@ -145,7 +146,7 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
|
|||
|
||||
### Session Window
|
||||
|
||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
|
||||
|
||||

|
||||
|
||||
|
@ -178,7 +179,7 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
|
|||
|
||||
### Examples
|
||||
|
||||
A table of intelligent meters can be created by the SQL statement below:
|
||||
A table of intelligent meters can be created by the SQL statement below:
|
||||
|
||||
```
|
||||
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||
|
|
|
@ -13,8 +13,11 @@ Because stream processing is built in to TDengine, you are no longer reliant on
|
|||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||
WATERMARK time
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
}
|
||||
|
||||
```
|
||||
|
@ -109,7 +112,7 @@ SHOW STREAMS;
|
|||
|
||||
When you create a stream, you can use the TRIGGER parameter to specify triggering conditions for it.
|
||||
|
||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering,the default value is AT_ONCE:
|
||||
For non-windowed processing, triggering occurs in real time. For windowed processing, there are three methods of triggering, the default value is AT_ONCE:
|
||||
|
||||
1. AT_ONCE: triggers on write
|
||||
|
||||
|
@ -141,3 +144,27 @@ The data in expired windows is tagged as expired. TDengine stream processing pro
|
|||
2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned.
|
||||
|
||||
In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated).
|
||||
|
||||
## Supported functions
|
||||
|
||||
All [scalar functions](../function/#scalar-functions) are available in stream processing. All [Aggregate functions](../function/#aggregate-functions) and [Selection functions](../function/#selection-functions) are available in stream processing, except the followings:
|
||||
- [leastsquares](../function/#leastsquares)
|
||||
- [percentile](../function/#percentile)
|
||||
- [top](../function/#top)
|
||||
- [bottom](../function/#bottom)
|
||||
- [elapsed](../function/#elapsed)
|
||||
- [interp](../function/#interp)
|
||||
- [derivative](../function/#derivative)
|
||||
- [irate](../function/#irate)
|
||||
- [twa](../function/#twa)
|
||||
- [histogram](../function/#histogram)
|
||||
- [diff](../function/#diff)
|
||||
- [statecount](../function/#statecount)
|
||||
- [stateduration](../function/#stateduration)
|
||||
- [csum](../function/#csum)
|
||||
- [mavg](../function/#mavg)
|
||||
- [sample](../function/#sample)
|
||||
- [tail](../function/#tail)
|
||||
- [unique](../function/#unique)
|
||||
- [mode](../function/#mode)
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ description: This document describes the JSON data type in TDengine.
|
|||
|
||||
- The maximum length of keys in JSON is 256 bytes, and key must be printable ASCII characters. The maximum total length of a JSON is 4,096 bytes.
|
||||
|
||||
- JSON format:
|
||||
- JSON format:
|
||||
|
||||
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
|
||||
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
|
||||
|
|
|
@ -20,7 +20,7 @@ description: This document describes the usage of escape characters in TDengine.
|
|||
|
||||
1. If there are escape characters in identifiers (database name, table name, column name)
|
||||
- Identifier without ``: Error will be returned because identifier must be constituted of digits, ASCII characters or underscore and can't be started with digits
|
||||
- Identifier quoted with ``: Original content is kept, no escaping
|
||||
- Identifier quoted with ``: Original content is kept, no escaping
|
||||
2. If there are escape characters in values
|
||||
- The escape characters will be escaped as the above table. If the escape character doesn't match any supported one, the escape character "\" will be ignored.
|
||||
- "%" and "\_" are used as wildcards in `like`. `\%` and `\_` should be used to represent literal "%" and "\_" in `like`,. If `\%` and `\_` are used out of `like` context, the evaluation result is "`\%`"and "`\_`", instead of "%" and "\_".
|
||||
|
|
|
@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
|||
|
||||
- Maximum length of database name is 64 bytes
|
||||
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
|
||||
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- Maximum length of each data row is 48K(64K since version 3.0.5.0) bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- The maximum length of a column name is 64 bytes.
|
||||
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
|
||||
- The maximum length of a tag name is 64 bytes
|
||||
|
|
|
@ -81,7 +81,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
|||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | BINARY(3) | Strong consistency. It should be noted that `strict` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 7 | strict | BINARY(4) | Obsoleted |
|
||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
@ -120,6 +120,9 @@ Provides information about user-defined functions.
|
|||
| 5 | create_time | TIMESTAMP | Creation time |
|
||||
| 6 | code_len | INT | Length of the source code |
|
||||
| 7 | bufsize | INT | Buffer size |
|
||||
| 8 | func_language | BINARY(31) | UDF programming language |
|
||||
| 9 | func_body | BINARY(16384) | UDF function body |
|
||||
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated|
|
||||
|
||||
## INS_INDEXES
|
||||
|
||||
|
@ -181,7 +184,7 @@ Provides information about standard tables and subtables.
|
|||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| # | **Column** | **Data Type** | **Description** |
|
||||
| --- | :---------: | ------------- | ---------------------- |
|
||||
| 1 | table_name | BINARY(192) | Table name |
|
||||
| 2 | db_name | BINARY(64) | Database name |
|
||||
|
|
|
@ -69,7 +69,7 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 1 | consumer_id | BIGINT | Consumer ID |
|
||||
| 2 | consumer_group | BINARY(192) | Consumer group |
|
||||
| 3 | client_id | BINARY(192) | Client ID (user-defined) |
|
||||
| 4 | status | BINARY(20) | Consumer status |
|
||||
| 4 | status | BINARY(20) | Consumer status. All possible status include: ready(consumer is in normal state), lost(the connection between consumer and mnode is broken), rebalance(the redistribution of vgroups that belongs to current consumer is now in progress), unknown(consumer is in invalid state)
|
||||
| 5 | topics | BINARY(204) | Subscribed topic. Returns one row for each topic. |
|
||||
| 6 | up_time | TIMESTAMP | Time of first connection to TDengine Server |
|
||||
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
|
||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: SHOW Statement
|
|||
description: This document describes how to use the SHOW statement in TDengine.
|
||||
---
|
||||
|
||||
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
`SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
|
||||
|
||||
## SHOW APPS
|
||||
|
||||
|
@ -36,7 +36,7 @@ Shows information about connections to the system.
|
|||
SHOW CONSUMERS;
|
||||
```
|
||||
|
||||
Shows information about all active consumers in the system.
|
||||
Shows information about all consumers in the system.
|
||||
|
||||
## SHOW CREATE DATABASE
|
||||
|
||||
|
@ -129,6 +129,14 @@ SHOW QNODES;
|
|||
|
||||
Shows information about qnodes in the system.
|
||||
|
||||
## SHOW QUERIES
|
||||
|
||||
```sql
|
||||
SHOW QUERIES;
|
||||
```
|
||||
|
||||
Shows the queries in progress in the system.
|
||||
|
||||
## SHOW SCORES
|
||||
|
||||
```sql
|
||||
|
@ -179,10 +187,10 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
Shows how table data is distributed.
|
||||
|
||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||
Examples: Below is an example of this command to display the block distribution of table `d0` in detailed format.
|
||||
|
||||
```sql
|
||||
show table distributed d0\G;
|
||||
show table distributed d0\G;
|
||||
```
|
||||
|
||||
<details>
|
||||
|
@ -193,31 +201,31 @@ _block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Comp
|
|||
|
||||
Total_Blocks : Table `d0` contains total 5 blocks
|
||||
|
||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||
|
||||
Average_size: The average size of each block is 18.73 KB
|
||||
|
||||
Compression_Ratio: The data compression rate is 23.98%
|
||||
|
||||
|
||||
*************************** 2.row ***************************
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: Table `d0` contains 20,000 rows
|
||||
|
||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||
|
||||
MinRows: The minimum number of rows in a block is 3,616
|
||||
MinRows: The minimum number of rows in a block is 3,616
|
||||
|
||||
MaxRows: The maximum number of rows in a block is 4,096B
|
||||
MaxRows: The maximum number of rows in a block is 4,096B
|
||||
|
||||
Average_Rows: The average number of rows in a block is 4,000
|
||||
Average_Rows: The average number of rows in a block is 4,000
|
||||
|
||||
*************************** 3.row ***************************
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||
|
||||
Total_Tables: The number of child tables, 1 in this example
|
||||
Total_Tables: The number of child tables, 1 in this example
|
||||
|
||||
Total_Files: The number of files storing the table's data, 2 in this example
|
||||
Total_Files: The number of files storing the table's data, 2 in this example
|
||||
|
||||
*************************** 4.row ***************************
|
||||
|
||||
|
@ -353,7 +361,7 @@ SHOW VARIABLES;
|
|||
SHOW DNODE dnode_id VARIABLES;
|
||||
```
|
||||
|
||||
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
|
||||
Shows the working configuration of the parameters that must be the same on each node. You can also specify a dnode to show the working configuration for that node.
|
||||
|
||||
## SHOW VGROUPS
|
||||
|
||||
|
@ -361,7 +369,7 @@ Shows the working configuration of the parameters that must be the same on each
|
|||
SHOW [db_name.]VGROUPS;
|
||||
```
|
||||
|
||||
Shows information about all vgroups in the current database.
|
||||
Shows information about all vgroups in the current database.
|
||||
|
||||
## SHOW VNODES
|
||||
|
||||
|
|
|
@ -7,17 +7,18 @@ description: This document describes the SQL statements related to user-defined
|
|||
You can create user-defined functions and import them into TDengine.
|
||||
## Create UDF
|
||||
|
||||
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
|
||||
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF is stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
|
||||
|
||||
When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition.
|
||||
|
||||
- Create Scalar Function
|
||||
```sql
|
||||
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
|
||||
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
|
||||
```
|
||||
|
||||
- function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file).
|
||||
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
|
||||
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
||||
- function_name: The scalar function name to be used in the SQL statement
|
||||
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python (v3.7+) is supported. If this clause is omitted, C is assumed as the programming language.
|
||||
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
||||
- output_type: The data type of the results of the UDF.
|
||||
|
||||
For example, the following SQL statement can be used to create a UDF from `libbitand.so`.
|
||||
|
@ -25,14 +26,20 @@ CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
|
|||
```sql
|
||||
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
|
||||
```
|
||||
For Example, the following SQL statement can be used to modify the existing function `bit_and`. The OUTPUT type is changed to BIGINT and the programming language is changed to Python.
|
||||
|
||||
```sql
|
||||
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
|
||||
```
|
||||
|
||||
- Create Aggregate Function
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
|
||||
```
|
||||
|
||||
- function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file).
|
||||
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
|
||||
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
||||
- function_name: The aggregate function name to be used in the SQL statement
|
||||
- LANGUAGE 'C|Python': the programming language of the UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
|
||||
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
||||
- output_type: The output data type, the value is the literal string of the supported TDengine data type.
|
||||
- buffer_size: The size of the intermediate buffer in bytes. This parameter is optional.
|
||||
|
||||
|
@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
|
|||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
||||
```
|
||||
For example, the following SQL statement modifies the buffer size of existing UDF `l2norm` to 64
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
||||
```
|
||||
|
||||
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
|
||||
|
||||
## Manage UDF
|
||||
|
@ -61,9 +73,9 @@ SHOW FUNCTIONS;
|
|||
|
||||
## Call UDF
|
||||
|
||||
The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example:
|
||||
The function name specified when creating UDF can be used directly in SQL statements, just like built-in functions. For example:
|
||||
```sql
|
||||
SELECT bit_and(c1,c2) FROM table;
|
||||
```
|
||||
|
||||
The above SQL statement invokes function X for column c1 and c2 on table. You can use query keywords like WHERE with user-defined functions.
|
||||
The above SQL statement invokes function X for columns c1 and c2 on the table. You can use query keywords like WHERE with user-defined functions.
|
||||
|
|
|
@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
|
|||
| - | :------- | :-------- | :------- |
|
||||
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
|
||||
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
|
||||
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consistency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
|
||||
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
|
||||
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
|
||||
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
|
||||
|
|
|
@ -13,7 +13,7 @@ Syntax Specifications used in this chapter:
|
|||
- Information that you input is given in lowercase.
|
||||
- \[ \] means optional input, excluding [] itself.
|
||||
- | means one of a few options, excluding | itself.
|
||||
- … means the item prior to it can be repeated multiple times.
|
||||
- ... means the item prior to it can be repeated multiple times.
|
||||
|
||||
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||
|
||||
|
|
|
@ -22,11 +22,11 @@ wget https://github.com/taosdata/grafanaplugin/raw/master/dashboards/TDinsight.s
|
|||
chmod +x TDinsight.sh
|
||||
```
|
||||
|
||||
Prepare:
|
||||
Prepare:
|
||||
|
||||
1. TDengine Server
|
||||
|
||||
- The URL of REST service:for example `http://localhost:6041` if TDengine is deployed locally
|
||||
- The URL of REST service: for example `http://localhost:6041` if TDengine is deployed locally
|
||||
- User name and password
|
||||
|
||||
2. Grafana Alert Notification
|
||||
|
@ -36,9 +36,310 @@ You can use below command to setup Grafana alert notification.
|
|||
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
||||
|
||||
```bash
|
||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||
```
|
||||
|
||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||
|
||||
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
|
||||
|
||||
## log database
|
||||
|
||||
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
|
||||
|
||||
### cluster\_info table
|
||||
|
||||
`cluster_info` table contains cluster information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|first\_ep|VARCHAR||first ep of cluster|
|
||||
|first\_ep\_dnode\_id|INT||dnode id or first\_ep|
|
||||
|version|VARCHAR||tdengine version. such as: 3.0.4.0|
|
||||
|master\_uptime|FLOAT||days of master's uptime|
|
||||
|monitor\_interval|INT||monitor interval in second|
|
||||
|dbs\_total|INT||total number of databases in cluster|
|
||||
|tbs\_total|BIGINT||total number of tables in cluster|
|
||||
|stbs\_total|INT||total number of stables in cluster|
|
||||
|dnodes\_total|INT||total number of dnodes in cluster|
|
||||
|dnodes\_alive|INT||total number of dnodes in ready state|
|
||||
|mnodes\_total|INT||total number of mnodes in cluster|
|
||||
|mnodes\_alive|INT||total number of mnodes in ready state|
|
||||
|vgroups\_total|INT||total number of vgroups in cluster|
|
||||
|vgroups\_alive|INT||total number of vgroups in ready state|
|
||||
|vnodes\_total|INT||total number of vnode in cluster|
|
||||
|vnodes\_alive|INT||total number of vnode in ready state|
|
||||
|connections\_total|INT||total number of connections to cluster|
|
||||
|topics\_total|INT||total number of topics in cluster|
|
||||
|streams\_total|INT||total number of streams in cluster|
|
||||
|protocol|INT||protocol version|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### d\_info table
|
||||
|
||||
`d_info` table contains dnodes information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|status|VARCHAR||dnode status|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### m\_info table
|
||||
|
||||
`m_info` table contains mnode information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|role|VARCHAR||the role of mnode. leader or follower|
|
||||
|mnode\_id|INT|TAG|master node id|
|
||||
|mnode\_ep|NCHAR|TAG|master node endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### dnodes\_info table
|
||||
|
||||
`dnodes_info` table contains dnodes information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|uptime|FLOAT||dnode uptime|
|
||||
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|
||||
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|
||||
|cpu\_cores|FLOAT||cpu cores of server|
|
||||
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|
||||
|mem\_system|INT||available memory on the server|
|
||||
|mem\_total|INT||total memory of server in `KB`|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|
||||
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|
||||
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|req\_select|INT||number of select queries received per dnode|
|
||||
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|
||||
|req\_insert|INT||number of insert queries received per dnode|
|
||||
|req\_insert\_success|INT||number of successfully insert queries received per dnode|
|
||||
|req\_insert\_rate|FLOAT||number of insert queries received per dnode divided by monitor interval|
|
||||
|req\_insert\_batch|INT||number of batch insertions|
|
||||
|req\_insert\_batch\_success|INT||number of successful batch insertions|
|
||||
|req\_insert\_batch\_rate|FLOAT||number of batch insertions divided by monitor interval|
|
||||
|errors|INT||dnode errors|
|
||||
|vnodes\_num|INT||number of vnodes per dnode|
|
||||
|masters|INT||number of master vnodes|
|
||||
|has\_mnode|INT||if the dnode has mnode|
|
||||
|has\_qnode|INT||if the dnode has qnode|
|
||||
|has\_snode|INT||if the dnode has snode|
|
||||
|has\_bnode|INT||if the dnode has bnode|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### data\_dir table
|
||||
|
||||
`data_dir` table contains data directory information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||data directory. default is `/var/lib/taos`|
|
||||
|level|INT||level for multi-level storage|
|
||||
|avail|BIGINT||available space for data directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log\_dir table
|
||||
|
||||
`log_dir` table contains log directory information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||log directory. default is `/var/log/taos/`|
|
||||
|avail|BIGINT||available space for log directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### temp\_dir table
|
||||
|
||||
`temp_dir` table contains temp dir information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||temp directory. default is `/tmp/`|
|
||||
|avail|BIGINT||available space for temp directory|
|
||||
|used|BIGINT||used space for temp directory|
|
||||
|total|BIGINT||total space for temp directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vgroups\_info table
|
||||
|
||||
`vgroups_info` table contains vgroups information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|vgroup\_id|INT||vgroup id|
|
||||
|database\_name|VARCHAR||database for the vgroup|
|
||||
|tables\_num|BIGINT||number of tables per vgroup|
|
||||
|status|VARCHAR||status|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vnodes\_role table
|
||||
|
||||
`vnodes_role` table contains vnode role information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|vnode\_role|VARCHAR||role. leader or follower|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### logs table
|
||||
|
||||
`logs` table contains login information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|level|VARCHAR||log level|
|
||||
|content|NCHAR||log content|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log\_summary table
|
||||
|
||||
`log_summary` table contains log summary information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|error|INT||error count|
|
||||
|info|INT||info count|
|
||||
|debug|INT||debug count|
|
||||
|trace|INT||trace count|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### grants\_info table
|
||||
|
||||
`grants_info` table contains grants information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|expire\_time|BIGINT||time until grants expire in seconds|
|
||||
|timeseries\_used|BIGINT||timeseries used|
|
||||
|timeseries\_total|BIGINT||total timeseries|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### keeper\_monitor table
|
||||
|
||||
`keeper_monitor` table contains keeper monitor information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|cpu|FLOAT||cpu usage|
|
||||
|mem|FLOAT||memory usage|
|
||||
|identify|NCHAR|TAG||
|
||||
|
||||
### taosadapter\_restful\_http\_request\_total table
|
||||
|
||||
`taosadapter_restful_http_request_total` table contains taosadapter rest request information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|gauge|DOUBLE||metric value|
|
||||
|client\_ip|NCHAR|TAG|client ip|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_fail table
|
||||
|
||||
`taosadapter_restful_http_request_fail` table contains taosadapter failed rest request information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|gauge|DOUBLE||metric value|
|
||||
|client\_ip|NCHAR|TAG|client ip|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_in\_flight table
|
||||
|
||||
`taosadapter_restful_http_request_in_flight` table contains taosadapter rest request information record in real time. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|gauge|DOUBLE||metric value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
||||
|
||||
`taosadapter_restful_http_request_summary_milliseconds` table contains the summary or rest information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|count|DOUBLE|||
|
||||
|sum|DOUBLE|||
|
||||
|0.5|DOUBLE|||
|
||||
|0.9|DOUBLE|||
|
||||
|0.99|DOUBLE|||
|
||||
|0.1|DOUBLE|||
|
||||
|0.2|DOUBLE|||
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|
||||
### taosadapter\_system\_mem\_percent table
|
||||
|
||||
`taosadapter_system_mem_percent` table contains taosadapter memory usage information. The timestamp of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|gauge|DOUBLE||metric value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter\_system\_cpu\_percent table
|
||||
|
||||
`taosadapter_system_cpu_percent` table contains taosadapter cup usage information. The timestamp of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|gauge|DOUBLE||mertic value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
|
|
|
@ -9,13 +9,13 @@ When a TDengine client is unable to access a TDengine server, the network connec
|
|||
|
||||
Diagnostics for network connections can be executed between Linux/Windows/macOS.
|
||||
|
||||
Diagnostic steps:
|
||||
Diagnostic steps:
|
||||
|
||||
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
|
||||
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
|
||||
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
|
||||
|
||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
||||
-l <pktlen\>: The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
|
||||
Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
|
||||
|
||||
Output of the server side for the example is below:
|
||||
|
|
|
@ -83,13 +83,13 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
|
|||
|
||||
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
||||
|
||||
- authentication information is shown below:
|
||||
- authentication information is shown below:
|
||||
|
||||
```text
|
||||
Authorization: Taosd <TOKEN>
|
||||
```
|
||||
|
||||
- Basic authentication information is shown below:
|
||||
- Basic authentication information is shown below:
|
||||
|
||||
```text
|
||||
Authorization: Basic <TOKEN>
|
||||
|
|
|
@ -12,9 +12,9 @@ C/C++ developers can use TDengine's client driver and the C/C++ connector, to de
|
|||
|
||||
After TDengine server or client installation, `taos.h` is located at
|
||||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
- Linux: usr/local/taos/include`
|
||||
- Windows: C:\TDengine\include`
|
||||
- macOS: usr/local/include`
|
||||
|
||||
The dynamic libraries for the TDengine client driver are located in.
|
||||
|
||||
|
@ -412,7 +412,8 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
|
||||
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
|
||||
|
||||
schemaless 其他相关的接口
|
||||
schemaless interfaces:
|
||||
|
||||
- `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
|
||||
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
|
||||
|
@ -423,6 +424,6 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
|
||||
**Description**
|
||||
- The above seven interfaces are extension interfaces, which are mainly used to pass ttl and reqid parameters, and can be used as needed.
|
||||
- Withing _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
|
||||
- Withing _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
|
||||
- Withing _reqid interfaces can track the entire call chain by passing the reqid parameter.
|
||||
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
|
||||
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
|
||||
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.
|
||||
|
|
|
@ -32,27 +32,114 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
|
|||
Native connections are supported on the same platforms as the TDengine client driver.
|
||||
REST connection supports all platforms that can run Java.
|
||||
|
||||
## Version support
|
||||
## Recent update logs
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
| taos-jdbcdriver version | major changes | TDengine version |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||
| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
|
||||
| 3.2.2 | subscription add seek function | 3.0.5.0 or later |
|
||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
|
||||
| 3.2.0 | This version has been deprecated | - |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
|
||||
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection | - |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function | - |
|
||||
| 2.0.37 | Support json tags | - |
|
||||
| 2.0.36 | Support schemaless writing | - |
|
||||
|
||||
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
||||
|
||||
### Handling exceptions
|
||||
|
||||
After an error is reported, the error message and error code can be obtained through SQLException.
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
// print result
|
||||
printResult(resultSet);
|
||||
} catch (SQLException e) {
|
||||
System.out.println("ERROR Message: " + e.getMessage());
|
||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||
e.printStackTrace();
|
||||
}
|
||||
```
|
||||
|
||||
There are four types of error codes that the JDBC connector can report:
|
||||
|
||||
- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350),
|
||||
- Error code of the native connection method (error code between 0x2351 and 0x2360)
|
||||
- Error code of the consumer method (error code between 0x2371 and 0x2380)
|
||||
- Error code of other TDengine function modules.
|
||||
|
||||
For specific error codes, please refer to.
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | The connection has been closed, check the connection status, or recreate the connection to execute the relevant instructions. |
|
||||
| 0x2302 | this operation is NOT supported currently! | The current interface does not support the connection. You can use another connection mode. |
|
||||
| 0x2303 | invalid variables | The parameter is invalid. Check the interface specification and adjust the parameter type and size. |
|
||||
| 0x2304 | statement is closed | The statement is closed. Check whether the statement is closed and used again, or whether the connection is normal. |
|
||||
| 0x2305 | resultSet is closed | result set The result set is released. Check whether the result set is released and used again. |
|
||||
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
|
||||
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
|
||||
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
|
||||
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
|
||||
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
|
||||
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
|
||||
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
|
||||
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
|
||||
| 0x2317 | | wrong request type was used in the REST connection. |
|
||||
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
|
||||
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||
| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
|
||||
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
|
||||
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
|
||||
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
|
||||
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
|
||||
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
|
||||
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
|
||||
| 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
||||
## TDengine DataType vs. Java DataType
|
||||
|
||||
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows:
|
||||
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ---------------------------------- |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ------------------ |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
|
||||
**Note**: Only TAG supports JSON types
|
||||
|
||||
|
@ -82,7 +169,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.0.0</version>
|
||||
<version>3.2.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -97,7 +184,7 @@ cd taos-connector-jdbc
|
|||
mvn clean install -Dmaven.test.skip=true
|
||||
```
|
||||
|
||||
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.0.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
|
||||
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.2.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
@ -198,10 +285,11 @@ The configuration parameters in the URL are as follows:
|
|||
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance.
|
||||
- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true.
|
||||
- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false.
|
||||
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms.
|
||||
- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false.
|
||||
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true.
|
||||
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms.
|
||||
- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false.
|
||||
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true.
|
||||
- useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection.
|
||||
- httpPoolSize: size of REST concurrent requests. The default value is 20.
|
||||
|
||||
**Note**: Some configuration items (e.g., locale, timezone) do not work in the REST connection.
|
||||
|
||||
|
@ -227,7 +315,7 @@ In addition to getting the connection from the specified URL, you can use Proper
|
|||
Note:
|
||||
|
||||
- The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set.
|
||||
- The following sample code is based on taos-jdbcdriver-3.0.0.
|
||||
- The following sample code is based on taos-jdbcdriver-3.1.0.
|
||||
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
|
@ -265,10 +353,11 @@ The configuration parameters in properties are as follows.
|
|||
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection.
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection.
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
|
||||
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
|
||||
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
|
||||
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
|
||||
|
||||
### Priority of configuration parameters
|
||||
|
@ -333,30 +422,19 @@ while(resultSet.next()){
|
|||
|
||||
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
||||
|
||||
### Handling exceptions
|
||||
### execute SQL with reqId
|
||||
|
||||
After an error is reported, the error message and error code can be obtained through SQLException.
|
||||
This reqId can be used to request link tracing.
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
// print result
|
||||
printResult(resultSet);
|
||||
} catch (SQLException e) {
|
||||
System.out.println("ERROR Message: " + e.getMessage());
|
||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||
e.printStackTrace();
|
||||
AbstractStatement aStmt = (AbstractStatement) connection.createStatement();
|
||||
aStmt.execute("create database if not exists db", 1L);
|
||||
aStmt.executeUpdate("use db", 2L);
|
||||
try (ResultSet rs = aStmt.executeQuery("select * from tb", 3L)) {
|
||||
Timestamp ts = rs.getTimestamp(1);
|
||||
}
|
||||
```
|
||||
|
||||
There are three types of error codes that the JDBC connector can report: - Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), - Error code of the native connection method (error code between 0x2351 and 0x2400), and - Error code of other TDengine function modules.
|
||||
|
||||
For specific error codes, please refer to.
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
||||
### Writing data via parameter binding
|
||||
|
||||
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
||||
|
@ -364,9 +442,12 @@ TDengine has significantly improved the bind APIs to support data writing (INSER
|
|||
**Note:**
|
||||
|
||||
- JDBC REST connections do not currently support bind interface
|
||||
- The following sample code is based on taos-jdbcdriver-3.0.0
|
||||
- The following sample code is based on taos-jdbcdriver-3.2.1
|
||||
- The setString method should be called for binary type data, and the setNString method should be called for nchar type data
|
||||
- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
|
||||
- Do not use `db.?` in prepareStatement when specify the database with the table name, should directly use `?`, then specify the database in setTableName, for example: `prepareStatement.setTableName("db.t1")`.
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
```java
|
||||
public class ParameterBindingDemo {
|
||||
|
@ -594,21 +675,7 @@ public class ParameterBindingDemo {
|
|||
}
|
||||
```
|
||||
|
||||
The methods to set TAGS values:
|
||||
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
|
||||
|
||||
The methods to set VALUES columns:
|
||||
|
||||
|
@ -625,17 +692,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
|
|||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection">
|
||||
|
||||
```java
|
||||
public class ParameterBindingDemo {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 30;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
|
||||
|
||||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
|
||||
bindFloat(conn);
|
||||
|
||||
bindBoolean(conn);
|
||||
|
||||
bindBytes(conn);
|
||||
|
||||
bindString(conn);
|
||||
|
||||
conn.close();
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_ws_parabind");
|
||||
stmt.execute("create database if not exists test_ws_parabind");
|
||||
stmt.execute("use test_ws_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
stmt.execute(schemaList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t1_" + i);
|
||||
// set tags
|
||||
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setTagLong(4, random.nextLong());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setLong(5, random.nextLong());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindFloat(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||
|
||||
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t2_" + i);
|
||||
// set tags
|
||||
pstmt.setTagFloat(1, random.nextFloat());
|
||||
pstmt.setTagDouble(2, random.nextDouble());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setFloat(2, random.nextFloat());
|
||||
pstmt.setDouble(3, random.nextDouble());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBoolean(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t3_" + i);
|
||||
// set tags
|
||||
pstmt.setTagBoolean(1, random.nextBoolean());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setBoolean(2, random.nextBoolean());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBytes(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t4_" + i);
|
||||
// set tags
|
||||
pstmt.setTagString(1, new String("abc"));
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setString(2, "abc");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindString(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t5_" + i);
|
||||
// set tags
|
||||
pstmt.setTagNString(1, "California.SanFrancisco");
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(0, new Timestamp(current + j));
|
||||
pstmt.setNString(1, "California.SanFrancisco");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
The methods to set TAGS values:
|
||||
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
|
||||
### Schemaless Writing
|
||||
|
||||
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
|
||||
|
||||
Note:
|
||||
|
||||
- JDBC REST connections do not currently support schemaless writes
|
||||
- The following sample code is based on taos-jdbcdriver-3.0.0
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
```java
|
||||
public class SchemalessInsertTest {
|
||||
public class SchemalessJniTest {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||
|
@ -663,6 +916,50 @@ public class SchemalessInsertTest {
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket connection">
|
||||
|
||||
```java
|
||||
public class SchemalessWsTest {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||
try(Connection connection = DriverManager.getConnection(url)){
|
||||
init(connection);
|
||||
|
||||
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection connection) throws SQLException {
|
||||
try (Statement stmt = connection.createStatement()) {
|
||||
stmt.executeUpdate("drop database if exists test_ws_schemaless");
|
||||
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
|
||||
stmt.executeUpdate("use test_ws_schemaless");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Schemaless with reqId
|
||||
|
||||
This reqId can be used to request link tracing.
|
||||
|
||||
```java
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS, 1L);
|
||||
```
|
||||
|
||||
### Data Subscription
|
||||
|
||||
The TDengine Java Connector supports subscription functionality with the following application API.
|
||||
|
@ -686,6 +983,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table`
|
|||
|
||||
```java
|
||||
Properties config = new Properties();
|
||||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||
|
@ -693,12 +991,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
|
|||
TaosConsumer consumer = new TaosConsumer<>(config);
|
||||
```
|
||||
|
||||
- bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used.
|
||||
- enable.auto.commit: Specifies whether to commit automatically.
|
||||
- group.id: consumer: Specifies the group that the consumer is in.
|
||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||
- httpConnectTimeout:WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout:socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
|
||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||
|
||||
#### Subscribe to consume data
|
||||
|
@ -706,14 +1006,49 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
```java
|
||||
while(true) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`poll` obtains one message each time it is run.
|
||||
|
||||
#### Assignment subscription Offset
|
||||
|
||||
```java
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
```
|
||||
|
||||
Example usage is as follows.
|
||||
|
||||
```java
|
||||
String topic = "offset_seek_test";
|
||||
Map<TopicPartition, Long> offset = null;
|
||||
try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
||||
consumer.subscribe(Collections.singletonList(topic));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
if (i == 3) {
|
||||
// Saving consumption position
|
||||
offset = consumer.position(topic);
|
||||
}
|
||||
if (i == 5) {
|
||||
// reset consumption to the previously saved position
|
||||
for (Map.Entry<TopicPartition, Long> entry : offset.entrySet()) {
|
||||
consumer.seek(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(500));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Close subscriptions
|
||||
|
||||
```java
|
||||
|
@ -741,10 +1076,20 @@ public abstract class ConsumerLoop {
|
|||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("td.connect.type", "jni");
|
||||
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
config.setProperty("group.id", "group1");
|
||||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -760,8 +1105,9 @@ public abstract class ConsumerLoop {
|
|||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
|
@ -815,12 +1161,19 @@ public abstract class ConsumerLoop {
|
|||
|
||||
public ConsumerLoop() throws SQLException {
|
||||
Properties config = new Properties();
|
||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.type", "ws");
|
||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||
config.setProperty("td.connect.user", "root");
|
||||
config.setProperty("td.connect.pass", "taosdata");
|
||||
config.setProperty("auto.offset.reset", "earliest");
|
||||
config.setProperty("msg.with.table.name", "true");
|
||||
config.setProperty("enable.auto.commit", "true");
|
||||
config.setProperty("auto.commit.interval.ms", "1000");
|
||||
config.setProperty("group.id", "group2");
|
||||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -836,8 +1189,9 @@ public abstract class ConsumerLoop {
|
|||
|
||||
while (!shutdown.get()) {
|
||||
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ResultBean record : records) {
|
||||
process(record);
|
||||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
process(bean);
|
||||
}
|
||||
}
|
||||
consumer.unsubscribe();
|
||||
|
@ -960,23 +1314,10 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
|
||||
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
|
||||
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
|
||||
- consumer-demo: consumer TDengine data example, the consumption rate can be controlled by parameters.
|
||||
|
||||
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
## Recent update logs
|
||||
|
||||
| taos-jdbcdriver version | major changes |
|
||||
| :---------------------: | :--------------------------------------------: |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||
| 3.0.0 | Support for TDengine 3.0 |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||
| 2.0.37 | Support json tags |
|
||||
| 2.0.36 | Support schemaless writing |
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`?
|
||||
|
@ -999,18 +1340,22 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
|
||||
4. java.lang.NoSuchMethodError: setByteArray
|
||||
|
||||
**Cause**: taos-jbdcdriver 3.* only supports TDengine 3.0 and later.
|
||||
**Cause**: taos-jbdcdriver 3.\* only supports TDengine 3.0 and later.
|
||||
|
||||
**Solution**: Use taos-jdbcdriver 2.* with your TDengine 2.* deployment.
|
||||
**Solution**: Use taos-jdbcdriver 2.\* with your TDengine 2.\* deployment.
|
||||
|
||||
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar
|
||||
|
||||
**Cause**:taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
||||
**Cause**: taos-jdbcdriver 3.0.1 is compiled on JDK 11.
|
||||
|
||||
**Solution**: Use taos-jdbcdriver 3.0.2.
|
||||
**Solution**: Use taos-jdbcdriver 3.0.2.
|
||||
|
||||
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
||||
|
||||
## API Reference
|
||||
|
||||
[taos-jdbcdriver doc](https://docs.taosdata.com/api/taos-jdbcdriver)
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
|
|
|
@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem';
|
|||
import Preparition from "./_preparation.mdx"
|
||||
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
|
||||
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
|
||||
import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx"
|
||||
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
|
||||
|
||||
[](https://crates.io/crates/taos)  [](https://docs.rs/taos)
|
||||
|
@ -26,20 +27,61 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
|
|||
Native connections are supported on the same platforms as the TDengine client driver.
|
||||
Websocket connections are supported on all platforms that can run Go.
|
||||
|
||||
## Version support
|
||||
## Version history
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
| connector-rust version | TDengine version | major features |
|
||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||
| v0.8.12 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
|
||||
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
|
||||
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
|
||||
| v0.6.0 | 3.0.0.0 | Base features. |
|
||||
|
||||
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
|
||||
|
||||
## Installation
|
||||
## Handling exceptions
|
||||
|
||||
After the error is reported, the specific information of the error can be obtained:
|
||||
|
||||
```rust
|
||||
match conn.exec(sql) {
|
||||
Ok(_) => {
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("ERROR: {:?}", e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## TDengine DataType vs. Rust DataType
|
||||
|
||||
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows:
|
||||
|
||||
| TDengine DataType | Rust DataType |
|
||||
| ----------------- | ----------------- |
|
||||
| TIMESTAMP | Timestamp |
|
||||
| INT | i32 |
|
||||
| BIGINT | i64 |
|
||||
| FLOAT | f32 |
|
||||
| DOUBLE | f64 |
|
||||
| SMALLINT | i16 |
|
||||
| TINYINT | i8 |
|
||||
| BOOL | bool |
|
||||
| BINARY | Vec<u8\> |
|
||||
| NCHAR | String |
|
||||
| JSON | serde_json::Value |
|
||||
|
||||
Note: Only TAG supports JSON types
|
||||
|
||||
## Installation Steps
|
||||
|
||||
### Pre-installation preparation
|
||||
|
||||
* Install the Rust development toolchain
|
||||
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
|
||||
|
||||
### Add taos dependency
|
||||
### Install the connectors
|
||||
|
||||
Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows:
|
||||
|
||||
|
@ -120,7 +162,7 @@ The parameters are described as follows:
|
|||
- **username/password**: Username and password used to create connections.
|
||||
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
|
||||
- **database**: Specify the default database to connect to. It's optional.
|
||||
- **params**:Optional parameters.
|
||||
- **params**: Optional parameters.
|
||||
|
||||
A sample DSN description string is as follows:
|
||||
|
||||
|
@ -140,7 +182,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
|
|||
let conn1 = builder.build();
|
||||
|
||||
// use websocket protocol.
|
||||
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
|
||||
let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
|
||||
let conn2 = builder2.build();
|
||||
```
|
||||
|
||||
After the connection is established, you can perform operations on your database.
|
||||
|
@ -222,37 +265,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se
|
|||
|
||||
## Usage examples
|
||||
|
||||
### Write data
|
||||
### Create database and tables
|
||||
|
||||
#### SQL Write
|
||||
```rust
|
||||
use taos::*;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let dsn = "taos://localhost:6030";
|
||||
let builder = TaosBuilder::from_dsn(dsn)?;
|
||||
|
||||
let taos = builder.build()?;
|
||||
|
||||
let db = "query";
|
||||
|
||||
// create database
|
||||
taos.exec_many([
|
||||
format!("DROP DATABASE IF EXISTS `{db}`"),
|
||||
format!("CREATE DATABASE `{db}`"),
|
||||
format!("USE `{db}`"),
|
||||
])
|
||||
.await?;
|
||||
|
||||
// create table
|
||||
taos.exec_many([
|
||||
// create super table
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
|
||||
TAGS (`groupid` INT, `location` BINARY(16))",
|
||||
// create child table
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||
]).await?;
|
||||
}
|
||||
```
|
||||
|
||||
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
|
||||
|
||||
### Insert data
|
||||
|
||||
<RustInsert />
|
||||
|
||||
#### STMT Write
|
||||
|
||||
<RustBind />
|
||||
|
||||
### Query data
|
||||
|
||||
<RustQuery />
|
||||
|
||||
## API Reference
|
||||
### execute SQL with req_id
|
||||
|
||||
### Connector Constructor
|
||||
|
||||
You create a connector constructor by using a DSN.
|
||||
This req_id can be used to request link tracing.
|
||||
|
||||
```rust
|
||||
let cfg = TaosBuilder::default().build()?;
|
||||
let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?;
|
||||
```
|
||||
|
||||
You use the builder object to create multiple connections.
|
||||
### Writing data via parameter binding
|
||||
|
||||
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
|
||||
|
||||
Parameter binding details see [API Reference](#stmt-api)
|
||||
|
||||
<RustBind />
|
||||
|
||||
### Schemaless Writing
|
||||
|
||||
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
|
||||
|
||||
<RustSml />
|
||||
|
||||
### Schemaless with req_id
|
||||
|
||||
This req_id can be used to request link tracing.
|
||||
|
||||
```rust
|
||||
let conn: Taos = cfg.build();
|
||||
let sml_data = SmlDataBuilder::default()
|
||||
.protocol(SchemalessProtocol::Line)
|
||||
.data(data)
|
||||
.req_id(100u64)
|
||||
.build()?;
|
||||
|
||||
client.put(&sml_data)?
|
||||
```
|
||||
|
||||
### Connection pooling
|
||||
### Data Subscription
|
||||
|
||||
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
|
||||
|
||||
#### Create a Topic
|
||||
|
||||
```rust
|
||||
taos.exec_many([
|
||||
// create topic for subscription
|
||||
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
|
||||
])
|
||||
.await?;
|
||||
```
|
||||
|
||||
#### Create a Consumer
|
||||
|
||||
You create a TMQ connector by using a DSN.
|
||||
|
||||
```rust
|
||||
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
|
||||
```
|
||||
|
||||
Create a consumer:
|
||||
|
||||
```rust
|
||||
let mut consumer = tmq.build()?;
|
||||
```
|
||||
|
||||
#### Subscribe to consume data
|
||||
|
||||
A single consumer can subscribe to one or more topics.
|
||||
|
||||
```rust
|
||||
consumer.subscribe(["tmq_meters"]).await?;
|
||||
```
|
||||
|
||||
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
|
||||
|
||||
```rust
|
||||
{
|
||||
let mut stream = consumer.stream();
|
||||
|
||||
while let Some((offset, message)) = stream.try_next().await? {
|
||||
// get information from offset
|
||||
|
||||
// the topic
|
||||
let topic = offset.topic();
|
||||
// the vgroup id, like partition id in kafka.
|
||||
let vgroup_id = offset.vgroup_id();
|
||||
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
|
||||
|
||||
if let Some(data) = message.into_data() {
|
||||
while let Some(block) = data.fetch_raw_block().await? {
|
||||
// one block for one table, get table name if needed
|
||||
let name = block.table_name();
|
||||
let records: Vec<Record> = block.deserialize().try_collect()?;
|
||||
println!(
|
||||
"** table: {}, got {} records: {:#?}\n",
|
||||
name.unwrap(),
|
||||
records.len(),
|
||||
records
|
||||
);
|
||||
}
|
||||
}
|
||||
consumer.commit(offset).await?;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Get assignments:
|
||||
|
||||
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
let assignments = consumer.assignments().await.unwrap();
|
||||
```
|
||||
|
||||
#### Assignment subscription Offset
|
||||
|
||||
Seek offset:
|
||||
|
||||
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||
```
|
||||
|
||||
#### Close subscriptions
|
||||
|
||||
```rust
|
||||
consumer.unsubscribe().await;
|
||||
```
|
||||
|
||||
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
|
||||
|
||||
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
|
||||
- `client.id`: Subscriber client ID.
|
||||
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
|
||||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||
|
||||
#### Full Sample Code
|
||||
|
||||
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||
|
||||
### Use with connection pool
|
||||
|
||||
In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2].
|
||||
|
||||
|
@ -282,7 +479,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos].
|
|||
let taos = pool.get()?;
|
||||
```
|
||||
|
||||
### Connectors
|
||||
### More sample programs
|
||||
|
||||
The source code of the sample application is under `TDengine/examples/rust` :
|
||||
|
||||
[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust)
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
|
||||
|
||||
## API Reference
|
||||
|
||||
The [Taos][struct.Taos] object provides an API to perform operations on multiple databases.
|
||||
|
||||
|
@ -368,9 +575,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required.
|
|||
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
|
||||
- `.use_database(database: &str)`: Executes the `USE` statement.
|
||||
|
||||
In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage.
|
||||
In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage.
|
||||
|
||||
### Bind Interface
|
||||
<p>
|
||||
<a id="stmt-api" style={{color:'#141414'}}>
|
||||
Bind Interface
|
||||
</a>
|
||||
</p>
|
||||
|
||||
Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement.
|
||||
|
||||
|
@ -381,7 +592,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
|
|||
|
||||
The bind object provides a set of interfaces for implementing parameter binding.
|
||||
|
||||
#### `.set_tbname(name)`
|
||||
`.set_tbname(name)`
|
||||
|
||||
To bind table names.
|
||||
|
||||
|
@ -390,7 +601,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
|
|||
stmt.set_tbname("d0")?;
|
||||
```
|
||||
|
||||
#### `.set_tags(&[tag])`
|
||||
`.set_tags(&[tag])`
|
||||
|
||||
Bind sub-table table names and tag values when the SQL statement uses a super table.
|
||||
|
||||
|
@ -400,7 +611,7 @@ stmt.set_tbname("d0")?;
|
|||
stmt.set_tags(&[Value::VarChar("taos".to_string())])?;
|
||||
```
|
||||
|
||||
#### `.bind(&[column])`
|
||||
`.bind(&[column])`
|
||||
|
||||
Bind value types. Use the [ColumnView] structure to create and bind the required types.
|
||||
|
||||
|
@ -424,7 +635,7 @@ let params = vec![
|
|||
let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
|
||||
```
|
||||
|
||||
#### `.execute()`
|
||||
`.execute()`
|
||||
|
||||
Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`.
|
||||
|
||||
|
@ -439,76 +650,6 @@ stmt.execute()?;
|
|||
|
||||
For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs).
|
||||
|
||||
### Subscriptions
|
||||
|
||||
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
|
||||
|
||||
You create a TMQ connector by using a DSN.
|
||||
|
||||
```rust
|
||||
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
|
||||
```
|
||||
|
||||
Create a consumer:
|
||||
|
||||
```rust
|
||||
let mut consumer = tmq.build()?;
|
||||
```
|
||||
|
||||
A single consumer can subscribe to one or more topics.
|
||||
|
||||
```rust
|
||||
consumer.subscribe(["tmq_meters"]).await?;
|
||||
```
|
||||
|
||||
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
|
||||
|
||||
```rust
|
||||
{
|
||||
let mut stream = consumer.stream();
|
||||
|
||||
while let Some((offset, message)) = stream.try_next().await? {
|
||||
// get information from offset
|
||||
|
||||
// the topic
|
||||
let topic = offset.topic();
|
||||
// the vgroup id, like partition id in kafka.
|
||||
let vgroup_id = offset.vgroup_id();
|
||||
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
|
||||
|
||||
if let Some(data) = message.into_data() {
|
||||
while let Some(block) = data.fetch_raw_block().await? {
|
||||
// one block for one table, get table name if needed
|
||||
let name = block.table_name();
|
||||
let records: Vec<Record> = block.deserialize().try_collect()?;
|
||||
println!(
|
||||
"** table: {}, got {} records: {:#?}\n",
|
||||
name.unwrap(),
|
||||
records.len(),
|
||||
records
|
||||
);
|
||||
}
|
||||
}
|
||||
consumer.commit(offset).await?;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Unsubscribe:
|
||||
|
||||
```rust
|
||||
consumer.unsubscribe().await;
|
||||
```
|
||||
|
||||
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
|
||||
|
||||
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
|
||||
- `client.id`: Subscriber client ID.
|
||||
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
|
||||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||
|
||||
For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
||||
|
||||
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
|
||||
|
||||
|
|
|
@ -20,14 +20,72 @@ The source code for the Python connector is hosted on [GitHub](https://github.co
|
|||
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
|
||||
- REST connections are supported on all platforms that can run Python.
|
||||
|
||||
### Supported features
|
||||
|
||||
- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing.
|
||||
- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.).
|
||||
|
||||
## Version selection
|
||||
|
||||
We recommend using the latest version of `taospy`, regardless of the version of TDengine.
|
||||
|
||||
## Supported features
|
||||
|Python Connector Version|major changes|
|
||||
|:-------------------:|:----:|
|
||||
|2.7.9|support for getting assignment and seek function on subscription|
|
||||
|2.7.8|add `execute_many` method|
|
||||
|
||||
- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing.
|
||||
- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.).
|
||||
|Python Websocket Connector Version|major changes|
|
||||
|:----------------------------:|:-----:|
|
||||
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|
||||
|0.2.4|support `unsubscribe` on subscription|
|
||||
|
||||
## Handling Exceptions
|
||||
|
||||
There are 4 types of exception in python connector.
|
||||
|
||||
- The exception of Python Connector itself.
|
||||
- The exception of native library.
|
||||
- The exception of websocket
|
||||
- The exception of subscription.
|
||||
- The exception of other TDengine function modules.
|
||||
|
||||
|Error Type|Description|Suggested Actions|
|
||||
|:--------:|:---------:|:---------------:|
|
||||
|InterfaceError|the native library is too old that it cannot support the function|please check the TDengine client version|
|
||||
|ConnectionError|connection error|please check TDengine's status and the connection params|
|
||||
|DatabaseError|database error|please upgrade Python connector to latest|
|
||||
|OperationalError|operation error||
|
||||
|ProgrammingError|||
|
||||
|StatementError|the exception of stmt||
|
||||
|ResultError|||
|
||||
|SchemalessError|the exception of stmt schemaless||
|
||||
|TmqError|the exception of stmt tmq||
|
||||
|
||||
It usually uses try-expect to handle exceptions in python. For exception handling, please refer to [Python Errors and Exceptions Documentation](https://docs.python.org/3/tutorial/errors.html).
|
||||
|
||||
All exceptions from the Python Connector are thrown directly. Applications should handle these exceptions. For example:
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/handle_exception.py}}
|
||||
```
|
||||
|
||||
## TDengine DataType vs. Python DataType
|
||||
|
||||
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Python is as follows:
|
||||
|
||||
|TDengine DataType|Python DataType|
|
||||
|:---------------:|:-------------:|
|
||||
|TIMESTAMP|datetime|
|
||||
|INT|int|
|
||||
|BIGINT|int|
|
||||
|FLOAT|float|
|
||||
|DOUBLE|int|
|
||||
|SMALLINT|int|
|
||||
|TINYINT|int|
|
||||
|BOOL|bool|
|
||||
|BINARY|str|
|
||||
|NCHAR|str|
|
||||
|JSON|str|
|
||||
|
||||
## Installation
|
||||
|
||||
|
@ -255,7 +313,7 @@ The `connect()` function returns a `taos.TaosConnection` instance. In client-sid
|
|||
|
||||
All arguments to the `connect()` function are optional keyword arguments. The following are the connection parameters specified.
|
||||
|
||||
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
||||
- `url`: The URL of taosAdapter REST service. The default is <http://localhost:6041>.
|
||||
- `user`: TDengine user name. The default is `root`.
|
||||
- `password`: TDengine user password. The default is `taosdata`.
|
||||
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
|
||||
|
@ -343,6 +401,8 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
|||
</TabItem>
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
The `Connection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
|
||||
```
|
||||
|
@ -353,6 +413,46 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Querying Data
|
||||
|
||||
<Tabs defaultValue="rest">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
|
||||
```
|
||||
|
||||
:::tip
|
||||
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
||||
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/rest_client_example.py}}
|
||||
```
|
||||
|
||||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Usage with req_id
|
||||
|
||||
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
|
||||
|
@ -362,7 +462,7 @@ By using the optional req_id parameter, you can specify a request ID that can be
|
|||
|
||||
##### TaosConnection class
|
||||
|
||||
The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
|
||||
As the way to connect introduced above but add `req_id` argument.
|
||||
|
||||
```python title="execute method"
|
||||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
|
||||
|
@ -372,13 +472,9 @@ The `TaosConnection` class contains both an implementation of the PEP249 Connect
|
|||
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
|
||||
```
|
||||
|
||||
:::tip
|
||||
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
|
||||
:::
|
||||
|
||||
##### Use of TaosResult class
|
||||
|
||||
In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
|
||||
As the way to fetch data introduced above but add `req_id` argument.
|
||||
|
||||
```python title="blocks_iter method"
|
||||
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
|
||||
|
@ -391,17 +487,12 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
|
|||
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
|
||||
```
|
||||
|
||||
:::note
|
||||
The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rest" label="REST connection">
|
||||
|
||||
##### Use of TaosRestCursor class
|
||||
|
||||
The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
|
||||
As the way to connect introduced above but add `req_id` argument.
|
||||
|
||||
```python title="Use of TaosRestCursor"
|
||||
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
|
||||
|
@ -421,8 +512,11 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
|
|||
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
As the way to connect introduced above but add `req_id` argument.
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
|
||||
```
|
||||
|
@ -459,11 +553,357 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Subscription
|
||||
|
||||
Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
|
||||
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
The `consumer` in the connector contains the subscription api.
|
||||
|
||||
##### Create Consumer
|
||||
|
||||
The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
|
||||
|
||||
```python
|
||||
from taos.tmq import Consumer
|
||||
|
||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||
```
|
||||
|
||||
##### Subscribe topics
|
||||
|
||||
The `subscribe` function is used to subscribe to a list of topics.
|
||||
|
||||
```python
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
##### Consume
|
||||
|
||||
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
```
|
||||
|
||||
##### assignment
|
||||
|
||||
The `assignment` function is used to get the assignment of the topic.
|
||||
|
||||
```python
|
||||
assignments = consumer.assignment()
|
||||
```
|
||||
|
||||
##### Seek
|
||||
|
||||
The `seek` function is used to reset the assignment of the topic.
|
||||
|
||||
```python
|
||||
tp = TopicPartition(topic='topic1', partition=0, offset=0)
|
||||
consumer.seek(tp)
|
||||
```
|
||||
|
||||
##### After consuming data
|
||||
|
||||
You should unsubscribe to the topics and close the consumer after consuming.
|
||||
|
||||
```python
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
##### Tmq subscription example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_example.py}}
|
||||
```
|
||||
|
||||
##### assignment and seek example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
In addition to native connections, the connector also supports subscriptions via websockets.
|
||||
|
||||
##### Create Consumer
|
||||
|
||||
The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
|
||||
|
||||
```python
|
||||
import taosws
|
||||
|
||||
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||
```
|
||||
|
||||
##### subscribe topics
|
||||
|
||||
The `subscribe` function is used to subscribe to a list of topics.
|
||||
|
||||
```python
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
##### Consume
|
||||
|
||||
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(timeout=1.0)
|
||||
if not res:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
for block in message:
|
||||
for row in block:
|
||||
print(row)
|
||||
```
|
||||
|
||||
##### assignment
|
||||
|
||||
The `assignment` function is used to get the assignment of the topic.
|
||||
|
||||
```python
|
||||
assignments = consumer.assignment()
|
||||
```
|
||||
|
||||
##### Seek
|
||||
|
||||
The `seek` function is used to reset the assignment of the topic.
|
||||
|
||||
```python
|
||||
consumer.seek(topic='topic1', partition=0, offset=0)
|
||||
```
|
||||
|
||||
##### After consuming data
|
||||
|
||||
You should unsubscribe to the topics and close the consumer after consuming.
|
||||
|
||||
```python
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
##### Subscription example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_websocket_example.py}}
|
||||
```
|
||||
|
||||
##### Assignment and seek example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Schemaless Insert
|
||||
|
||||
Connector support schemaless insert.
|
||||
|
||||
<Tabs defaultValue="list">
|
||||
<TabItem value="list" label="List Insert">
|
||||
|
||||
##### Simple insert
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/schemaless_insert.py}}
|
||||
```
|
||||
|
||||
##### Insert with ttl argument
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/schemaless_insert_ttl.py}}
|
||||
```
|
||||
|
||||
##### Insert with req_id argument
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/schemaless_insert_req_id.py}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="raw" label="Raw Insert">
|
||||
|
||||
##### Simple insert
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/schemaless_insert_raw.py}}
|
||||
```
|
||||
|
||||
##### Insert with ttl argument
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
|
||||
```
|
||||
|
||||
##### Insert with req_id argument
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Parameter Binding
|
||||
|
||||
The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="native" label="native connection">
|
||||
|
||||
##### Create Stmt
|
||||
|
||||
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
|
||||
|
||||
```
|
||||
import taos
|
||||
|
||||
conn = taos.connect()
|
||||
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
```
|
||||
|
||||
##### parameter binding
|
||||
|
||||
Call the `new_multi_binds` function to create the parameter list for parameter bindings.
|
||||
|
||||
```
|
||||
params = new_multi_binds(16)
|
||||
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, None])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
```
|
||||
|
||||
Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
|
||||
|
||||
```
|
||||
stmt.bind_param_batch(params)
|
||||
```
|
||||
|
||||
##### execute sql
|
||||
|
||||
Call `execute` method to execute sql.
|
||||
|
||||
```
|
||||
stmt.execute()
|
||||
```
|
||||
|
||||
##### Close Stmt
|
||||
|
||||
```
|
||||
stmt.close()
|
||||
```
|
||||
|
||||
##### Example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
##### Create Stmt
|
||||
|
||||
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
|
||||
|
||||
```
|
||||
import taosws
|
||||
|
||||
conn = taosws.connect('taosws://localhost:6041/test')
|
||||
stmt = conn.statement()
|
||||
```
|
||||
|
||||
##### Prepare sql
|
||||
|
||||
Call `prepare` method in stmt to prepare sql.
|
||||
|
||||
```
|
||||
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
|
||||
```
|
||||
|
||||
##### parameter binding
|
||||
|
||||
Call the `bind_param` method to bind parameters.
|
||||
|
||||
```
|
||||
stmt.bind_param([
|
||||
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
|
||||
taosws.ints_to_column([1, 2, 3, 4]),
|
||||
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
|
||||
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
|
||||
])
|
||||
```
|
||||
|
||||
Call the `add_batch` method to add parameters to the batch.
|
||||
|
||||
```
|
||||
stmt.add_batch()
|
||||
```
|
||||
|
||||
##### execute sql
|
||||
|
||||
Call `execute` method to execute sql.
|
||||
|
||||
```
|
||||
stmt.execute()
|
||||
```
|
||||
|
||||
##### Close Stmt
|
||||
|
||||
```
|
||||
stmt.close()
|
||||
```
|
||||
|
||||
##### Example
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_websocket_example.py}}
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Other sample programs
|
||||
|
||||
| Example program links | Example program content |
|
||||
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
|
||||
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
|
||||
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding,
|
||||
bind multiple rows at once |
|
||||
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
|
||||
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
|
||||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
|
||||
|
@ -471,14 +911,6 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
|
|||
|
||||
## Other notes
|
||||
|
||||
### Exception handling
|
||||
|
||||
All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example:
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/handle_exception.py}}
|
||||
```
|
||||
|
||||
### About nanoseconds
|
||||
|
||||
Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full.
|
||||
|
|
|
@ -321,18 +321,18 @@ let cursor = conn.cursor();
|
|||
| package name | version | TDengine version | Description |
|
||||
|------------------|---------|---------------------|------------------------------------------------------------------|
|
||||
| @tdengine/client | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||
| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | Fixed cursor.close() bug. |
|
||||
| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
||||
| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
||||
| td2.0-connector | 2.0.12 | 2.4.x; 2.5.x; 2.6.x | Fixed cursor.close() bug. |
|
||||
| td2.0-connector | 2.0.11 | 2.4.x; 2.5.x; 2.6.x | Supports parameter binding, JSON tags and schemaless interface |
|
||||
| td2.0-connector | 2.0.10 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, connection queries, system information, and data subscription |
|
||||
### REST Connector
|
||||
|
||||
| package name | version | TDengine version | Description |
|
||||
|----------------------|---------|---------------------|---------------------------------------------------------------------------|
|
||||
| @tdengine/rest | 3.0.0 | 3.0.0 | Supports TDengine 3.0. Not compatible with TDengine 2.x. |
|
||||
| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | Removed default port 6041。 |
|
||||
| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
||||
| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | Support cloud token |
|
||||
| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
||||
| td2.0-rest-connector | 1.0.7 | 2.4.x; 2.5.x; 2.6.x | Removed default port 6041 |
|
||||
| td2.0-rest-connector | 1.0.6 | 2.4.x; 2.5.x; 2.6.x | Fixed affectRows bug with create, insert, update, and alter. |
|
||||
| td2.0-rest-connector | 1.0.5 | 2.4.x; 2.5.x; 2.6.x | Support cloud token |
|
||||
| td2.0-rest-connector | 1.0.3 | 2.4.x; 2.5.x; 2.6.x | Supports connection management, standard queries, system information, error information, and continuous queries |
|
||||
|
||||
## API Reference
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ The parameters are described as follows:
|
|||
* **username/password**: Username and password used to create connections.
|
||||
* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
|
||||
* **database**: Specify the default database to connect to. It's optional.
|
||||
* **params**:Optional parameters.
|
||||
* **params**: Optional parameters.
|
||||
|
||||
A sample DSN description string is as follows:
|
||||
|
||||
|
@ -279,7 +279,7 @@ ws://localhost:6041/test
|
|||
| TDengine.Connector | Description |
|
||||
|--------------------|--------------------------------|
|
||||
| 3.0.2 | Support .NET Framework 4.5 and above. Support .Net standard 2.0. Nuget package includes dynamic library for WebSocket.|
|
||||
| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
|
||||
| 3.0.1 | Support WebSocket and Cloud, With function query, insert, and parameter binding|
|
||||
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
|
||||
| 1.0.7 | Fixed TDengine.Query() memory leak. |
|
||||
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
|
||||
|
|
|
@ -8,23 +8,23 @@ description: This document describes the TDengine PHP connector.
|
|||
|
||||
PHP Connector relies on TDengine client driver.
|
||||
|
||||
Project Repository:<https://github.com/Yurunsoft/php-tdengine>
|
||||
Project Repository: <https://github.com/Yurunsoft/php-tdengine>
|
||||
|
||||
After TDengine client or server is installed, `taos.h` is located at:
|
||||
|
||||
- Linux:`/usr/local/taos/include`
|
||||
- Windows:`C:\TDengine\include`
|
||||
- macOS:`/usr/local/include`
|
||||
- Linux: `/usr/local/taos/include`
|
||||
- Windows: `C:\TDengine\include`
|
||||
- macOS: `/usr/local/include`
|
||||
|
||||
TDengine client driver is located at:
|
||||
|
||||
- Linux: `/usr/local/taos/driver/libtaos.so`
|
||||
- Windows: `C:\TDengine\taos.dll`
|
||||
- macOS:`/usr/local/lib/libtaos.dylib`
|
||||
- macOS: `/usr/local/lib/libtaos.dylib`
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
- Windows、Linux、MacOS
|
||||
- Windows, Linux, and macOS
|
||||
|
||||
- PHP >= 7.4
|
||||
|
||||
|
@ -44,7 +44,7 @@ Regarding how to install TDengine client driver please refer to [Install Client
|
|||
|
||||
### Install php-tdengine
|
||||
|
||||
**Download Source Code Package and Unzip:**
|
||||
**Download Source Code Package and Unzip: **
|
||||
|
||||
```shell
|
||||
curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive/refs/tags/v1.0.2.tar.gz \
|
||||
|
@ -54,13 +54,13 @@ curl -L -o php-tdengine.tar.gz https://github.com/Yurunsoft/php-tdengine/archive
|
|||
|
||||
> Version number `v1.0.2` is only for example, it can be replaced to any newer version, please find available versions in [TDengine PHP Connector Releases](https://github.com/Yurunsoft/php-tdengine/releases).
|
||||
|
||||
**Non-Swoole Environment:**
|
||||
**Non-Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure && make -j && make install
|
||||
```
|
||||
|
||||
**Specify TDengine location:**
|
||||
**Specify TDengine location: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 && make -j && make install
|
||||
|
@ -69,7 +69,7 @@ phpize && ./configure --with-tdengine-dir=/usr/local/Cellar/tdengine/3.0.0.0 &&
|
|||
> `--with-tdengine-dir=` is followed by TDengine location.
|
||||
> It's useful in case TDengine installatio location can't be found automatically or MacOS.
|
||||
|
||||
**Swoole Environment:**
|
||||
**Swoole Environment: **
|
||||
|
||||
```shell
|
||||
phpize && ./configure --enable-swoole && make -j && make install
|
||||
|
|
|
@ -48,7 +48,6 @@ Comparing the connector support for TDengine functional features as follows.
|
|||
| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
|
||||
| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
|
||||
| **Schemaless** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
:::info
|
||||
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
|
||||
|
@ -60,11 +59,10 @@ The different database framework specifications for various programming language
|
|||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||
| **Parameter Binding** | Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Schemaless** | Supported | Not Supported | Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
:::warning
|
||||
|
||||
|
|
|
@ -54,94 +54,91 @@ Command-line arguments take precedence over environment variables over configura
|
|||
|
||||
```shell
|
||||
Usage of taosAdapter:
|
||||
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
|
||||
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
|
||||
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
|
||||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
|
||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
||||
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
||||
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
|
||||
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
|
||||
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
|
||||
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
|
||||
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
|
||||
--help Print this help message and exit
|
||||
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
|
||||
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
|
||||
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
|
||||
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
|
||||
--log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
|
||||
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
|
||||
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
|
||||
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
|
||||
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
|
||||
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
|
||||
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
|
||||
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
|
||||
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
|
||||
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
|
||||
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
|
||||
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
|
||||
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
|
||||
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
|
||||
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
|
||||
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
|
||||
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
|
||||
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
|
||||
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
|
||||
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
|
||||
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
|
||||
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
|
||||
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
|
||||
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
|
||||
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
|
||||
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
|
||||
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
|
||||
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
|
||||
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
|
||||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
|
||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
||||
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
||||
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
|
||||
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
|
||||
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
|
||||
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
|
||||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
|
||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
|
||||
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
|
||||
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
|
||||
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
|
||||
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
|
||||
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
|
||||
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
|
||||
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
|
||||
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
|
||||
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
|
||||
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
|
||||
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
|
||||
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
|
||||
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
|
||||
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
|
||||
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
|
||||
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
|
||||
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
|
||||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
|
||||
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
|
||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||
--version Print the version and exit
|
||||
--collectd.db string collectd db name. Env "TAOS_ADAPTER_COLLECTD_DB" (default "collectd")
|
||||
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
|
||||
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
|
||||
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
|
||||
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
|
||||
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
|
||||
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
|
||||
-c, --config string config path default /etc/taos/taosadapter.toml
|
||||
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
|
||||
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
|
||||
--cors.allowHeaders stringArray cors allow HEADERS. Env "TAOS_ADAPTER_ALLOW_HEADERS"
|
||||
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
|
||||
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
|
||||
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
|
||||
--help Print this help message and exit
|
||||
--httpCodeServerError Use a non-200 http status code when server returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
|
||||
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
|
||||
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
|
||||
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30)
|
||||
--log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB")
|
||||
--log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s)
|
||||
--log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2)
|
||||
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
|
||||
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
|
||||
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
|
||||
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
|
||||
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
|
||||
--monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
|
||||
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
|
||||
--monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
|
||||
--monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
|
||||
--monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
|
||||
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
|
||||
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
|
||||
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
|
||||
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
|
||||
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
|
||||
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
|
||||
--node_exporter.enable enable node_exporter. Env "TAOS_ADAPTER_NODE_EXPORTER_ENABLE"
|
||||
--node_exporter.gatherDuration duration node_exporter gather duration. Env "TAOS_ADAPTER_NODE_EXPORTER_GATHER_DURATION" (default 5s)
|
||||
--node_exporter.httpBearerTokenString string node_exporter http bearer token. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_BEARER_TOKEN_STRING"
|
||||
--node_exporter.httpPassword string node_exporter http password. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_PASSWORD"
|
||||
--node_exporter.httpUsername string node_exporter http username. Env "TAOS_ADAPTER_NODE_EXPORTER_HTTP_USERNAME"
|
||||
--node_exporter.insecureSkipVerify node_exporter skip ssl check. Env "TAOS_ADAPTER_NODE_EXPORTER_INSECURE_SKIP_VERIFY" (default true)
|
||||
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
|
||||
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
|
||||
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
|
||||
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
|
||||
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
|
||||
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
|
||||
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
|
||||
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
|
||||
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
|
||||
--opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE"
|
||||
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
|
||||
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
|
||||
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
|
||||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
|
||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
|
||||
--pool.maxConnect int max connections to server. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
|
||||
--pool.maxIdle int max idle connections to server. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
|
||||
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
|
||||
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
|
||||
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
|
||||
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
|
||||
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
|
||||
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
|
||||
--statsd.deleteGauges statsd delete gauge cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_GAUGES" (default true)
|
||||
--statsd.deleteSets statsd delete set cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_SETS" (default true)
|
||||
--statsd.deleteTimings statsd delete timing cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_TIMINGS" (default true)
|
||||
--statsd.enable enable statsd. Env "TAOS_ADAPTER_STATSD_ENABLE" (default true)
|
||||
--statsd.gatherInterval duration statsd gather interval. Env "TAOS_ADAPTER_STATSD_GATHER_INTERVAL" (default 5s)
|
||||
--statsd.maxTCPConnections int statsd max tcp connections. Env "TAOS_ADAPTER_STATSD_MAX_TCP_CONNECTIONS" (default 250)
|
||||
--statsd.password string statsd password. Env "TAOS_ADAPTER_STATSD_PASSWORD" (default "taosdata")
|
||||
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
|
||||
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
|
||||
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
|
||||
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
|
||||
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
|
||||
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
|
||||
--tmq.releaseIntervalMultiplierForAutocommit int When set to autocommit, the interval for message release is a multiple of the autocommit interval, with a default value of 2 and a minimum value of 1 and a maximum value of 10. Env "TAOS_ADAPTER_TMQ_RELEASE_INTERVAL_MULTIPLIER_FOR_AUTOCOMMIT" (default 2)
|
||||
--version Print the version and exit
|
||||
```
|
||||
|
||||
Note:
|
||||
|
@ -332,6 +329,10 @@ This parameter controls the number of results returned by the following interfac
|
|||
|
||||
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 http status code http status code other than when the C interface returns an error. When set to true, different http status codes will be returned according to the error code returned by C. For details, see [RESTful API](https://docs.tdengine.com/reference/rest-api/) HTTP Response Code chapter.
|
||||
|
||||
## Configure whether schemaless writes automatically create DBs
|
||||
|
||||
Starting from version 3.0.4.0, the taosAdapter provides the parameter "smlAutoCreateDB" to control whether to automatically create DBs when writing with the schemaless protocol. The default value is false, which means that the DB will not be automatically created and the user needs to manually create the DB before performing schemaless writing.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can check the taosAdapter running status with the `systemctl status taosadapter` command.
|
||||
|
|
|
@ -245,7 +245,7 @@ The parameters listed in this section apply to all function modes.
|
|||
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
|
||||
|
||||
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
|
||||
|
||||
|
||||
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
|
||||
|
||||
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
|
||||
|
|
|
@ -76,10 +76,9 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-A, --all-databases Dump all databases.
|
||||
-D, --databases=DATABASES Dump listed databases. Use comma to separate
|
||||
database names.
|
||||
-e, --escape-character Use escaped character for database name
|
||||
-N, --without-property Dump database without its properties.
|
||||
-s, --schemaonly Only dump table schemas.
|
||||
-y, --answer-yes Input yes for prompt. It will skip data file
|
||||
checking!
|
||||
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
|
||||
and lzma.
|
||||
-S, --start-time=START_TIME Start time to dump. Either epoch or
|
||||
|
|
|
@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval
|
|||
|
||||
To deploy TDinsight, we need
|
||||
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
|
||||
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
|
||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
|
||||
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
|
||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
|
||||
|
||||
Please record
|
||||
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
|
||||
|
@ -149,7 +149,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
|||
Use the `uid` value obtained above as `-E` input.
|
||||
|
||||
```bash
|
||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
```
|
||||
|
||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||
|
@ -233,7 +233,7 @@ After the importing is done, `TDinsight for 3.x` dashboard is available on the p
|
|||
|
||||
In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
|
||||
|
||||

|
||||

|
||||
|
||||
## TDinsight dashboard details
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ description: This document describes the configuration parameters for the TDengi
|
|||
|
||||
## Configuration File on Server Side
|
||||
|
||||
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
|
||||
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos` on Linux system, it's located under `C:\TDengine` on Windows system. The location of configuration file can be specified by using `-c` parameter on the CLI of `taosd`. For example, on Linux system the configuration file can be put under `/home/user` and used like below
|
||||
|
||||
```
|
||||
taosd -c /home/user
|
||||
|
@ -19,16 +19,20 @@ taosd -C
|
|||
|
||||
## Configuration File on Client Side
|
||||
|
||||
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
|
||||
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example:
|
||||
|
||||
```
|
||||
taos -c /home/cfg
|
||||
```
|
||||
|
||||
means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
|
||||
|
||||
Parameter `-C` can be used on the CLI of `taos` to show its configuration, like below:
|
||||
|
||||
```bash
|
||||
taos -C
|
||||
```
|
||||
|
||||
```bash
|
||||
taos --dump-config
|
||||
```
|
||||
|
||||
## Configuration Parameters
|
||||
|
||||
:::note
|
||||
|
@ -45,19 +49,19 @@ The parameters described in this document by the effect that they have on the sy
|
|||
|
||||
### firstEp
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
||||
| Default | localhost:6030 |
|
||||
| Attribute | Description |
|
||||
| ---------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
||||
| Default | localhost:6030 |
|
||||
|
||||
### secondEp
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
||||
| Default | None |
|
||||
| Attribute | Description |
|
||||
| ---------- | ---------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
||||
| Default | None |
|
||||
|
||||
### fqdn
|
||||
|
||||
|
@ -65,28 +69,29 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| ------------- | ------------------------------------------------------------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The FQDN of the host where `taosd` will be started. It can be IP address |
|
||||
| Default Value | The first hostname configured for the host |
|
||||
| Note | It should be within 96 bytes | |
|
||||
| Default Value | The first hostname configured for the host |
|
||||
| Note | It should be within 96 bytes | |
|
||||
|
||||
### serverPort
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The port for external access after `taosd` is started |
|
||||
| Default Value | 6030 |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The port for external access after `taosd` is started |
|
||||
| Default Value | 6030 |
|
||||
|
||||
:::note
|
||||
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
||||
Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
||||
:::
|
||||
| Protocol | Default Port | Description | How to configure |
|
||||
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
||||
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
||||
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
||||
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
|
||||
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
|
||||
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
||||
|
||||
| Protocol | Default Port | Description | How to configure |
|
||||
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
||||
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
||||
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
||||
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
|
||||
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |
|
||||
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
||||
|
||||
### maxShellConns
|
||||
|
||||
|
@ -97,121 +102,141 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Value Range | 10-50000000 |
|
||||
| Default Value | 5000 |
|
||||
|
||||
### numOfRpcSessions
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------ |
|
||||
| Applicable | Client/Server |
|
||||
| Meaning | The maximum number of connection to create |
|
||||
| Value Range | 100-100000 |
|
||||
| Default Value | 10000 |
|
||||
|
||||
### timeToGetAvailableConn
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------------------------------- |
|
||||
| Applicable | Client/Server |
|
||||
| Meaning | The maximum waiting time to get available conn |
|
||||
| Value Range | 10-50000000(ms) |
|
||||
| Default Value | 500000 |
|
||||
|
||||
## Monitoring Parameters
|
||||
|
||||
:::note
|
||||
Please note the `taoskeeper` needs to be installed and running to create the `log` database and receiving metrics sent by `taosd` as the full monitoring solution.
|
||||
|
||||
:::
|
||||
|
||||
### monitor
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
|
||||
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
||||
| Default | 0 |
|
||||
| Attribute | Description |
|
||||
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`. |
|
||||
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
||||
| Default | 0 |
|
||||
|
||||
### monitorFqdn
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | FQDN of taosKeeper monitoring service |
|
||||
| Default | None |
|
||||
| Attribute | Description |
|
||||
| ---------- | ------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | FQDN of taosKeeper monitoring service |
|
||||
| Default | None |
|
||||
|
||||
### monitorPort
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | --------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Port of taosKeeper monitoring service |
|
||||
| Default Value | 6043 |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Port of taosKeeper monitoring service |
|
||||
| Default Value | 6043 |
|
||||
|
||||
### monitorInterval
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The interval of collecting system workload |
|
||||
| Unit | second |
|
||||
| Value Range | 1-200000 |
|
||||
| Default Value | 30 |
|
||||
| Value Range | 1-200000 |
|
||||
| Default Value | 30 |
|
||||
|
||||
### telemetryReporting
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ---------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||
| Value Range | 0: Not allowed; 1: Allowed |
|
||||
| Default Value | 1 |
|
||||
| Default Value | 1 |
|
||||
### crashReporting
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
||||
| Value Range | 0,1 0: Not allowed;1:allowed |
|
||||
| Default Value | 1 |
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Switch for allowing TDengine to collect and report crash related information |
|
||||
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
||||
| Default Value | 1 |
|
||||
|
||||
|
||||
## Query Parameters
|
||||
|
||||
### queryPolicy
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ----------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Execution policy for query statements |
|
||||
| Unit | None |
|
||||
| Default | 1 |
|
||||
| Attribute | Description |
|
||||
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Execution policy for query statements |
|
||||
| Unit | None |
|
||||
| Default | 1 |
|
||||
| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. |
|
||||
|
||||
### querySmaOptimize
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | SMA index optimization policy |
|
||||
| Unit | None |
|
||||
| Default Value | 0 |
|
||||
| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | SMA index optimization policy |
|
||||
| Unit | None |
|
||||
| Default Value | 0 |
|
||||
| Notes | 0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results. |
|
||||
|
||||
### countAlwaysReturnValue
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||
| Vlue Range | 0:Return empty line,1:Return 0 |
|
||||
| Default | 1 |
|
||||
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
||||
| Attribute | Description |
|
||||
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
||||
| Default | 1 |
|
||||
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
||||
|
||||
### maxNumOfDistinctRes
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | -------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | The maximum number of distinct rows returned |
|
||||
| Value Range | [100,000 - 100,000,000] |
|
||||
| Default Value | 100,000 |
|
||||
|
||||
### keepColumnName
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||
| Default Value | 0 |
|
||||
| Attribute | Description |
|
||||
| ------------- | --------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||
| Default Value | 0 |
|
||||
|
||||
## Locale Parameters
|
||||
|
||||
### timezone
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------ |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | TimeZone |
|
||||
| Default Value | TimeZone configured in the host |
|
||||
|
||||
|
@ -314,383 +339,434 @@ The charset that takes effect is UTF-8.
|
|||
|
||||
### dataDir
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | All data files are stored in this directory |
|
||||
| Default Value | /var/lib/taos |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | All data files are stored in this directory |
|
||||
| Default Value | /var/lib/taos |
|
||||
| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
|
||||
|
||||
### tempDir
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------ |
|
||||
| Applicable | Server only |
|
||||
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||
| Default | /tmp |
|
||||
| Attribute | Description |
|
||||
| ---------- | ---------------------------------------------------------------------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||
| Default | /tmp |
|
||||
|
||||
### minimalTmpDirGB
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------------ |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
|
||||
| Unit | GB |
|
||||
| Default Value | 1.0 |
|
||||
| Unit | GB |
|
||||
| Default Value | 1.0 |
|
||||
|
||||
### minimalDataDirGB
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
|
||||
| Unit | GB |
|
||||
| Default Value | 2.0 |
|
||||
| Unit | GB |
|
||||
| Default Value | 2.0 |
|
||||
|
||||
### metaCacheMaxSize
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Client Only |
|
||||
| Meaning | Maximum meta cache size in single client process |
|
||||
| Unit | MB |
|
||||
| Default Value | -1 (No limitation) |
|
||||
|
||||
|
||||
## Cluster Parameters
|
||||
|
||||
### supportVnodes
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | --------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Maximum number of vnodes per dnode |
|
||||
| Value Range | 0-4096 |
|
||||
| Default Value | 2x the CPU cores |
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Maximum number of vnodes per dnode |
|
||||
| Value Range | 0-4096 |
|
||||
| Default Value | 2x the CPU cores |
|
||||
|
||||
## Performance Tuning
|
||||
|
||||
### numOfCommitThreads
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Maximum number of threads to commit |
|
||||
| Value Range | 0-1024 |
|
||||
| Default Value | |
|
||||
|
||||
## Log Parameters
|
||||
|
||||
### logDir
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The directory for writing log files |
|
||||
| Default Value | /var/log/taos |
|
||||
|
||||
### minimalLogDirGB
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
|
||||
| Unit | GB |
|
||||
| Default Value | 1.0 |
|
||||
| Unit | GB |
|
||||
| Default Value | 1.0 |
|
||||
|
||||
### numOfLogLines
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ---------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------ |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Maximum number of lines in single log file |
|
||||
| Default Value | 10000000 |
|
||||
| Default Value | 10000000 |
|
||||
|
||||
### asyncLog
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The mode of writing log file |
|
||||
| Value Range | 0: sync way; 1: async way |
|
||||
| Default Value | 1 |
|
||||
| Default Value | 1 |
|
||||
|
||||
### logKeepDays
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ----------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | The number of days for log files to be kept |
|
||||
| Unit | day |
|
||||
| Default Value | 0 |
|
||||
| Unit | day |
|
||||
| Default Value | 0 |
|
||||
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
|
||||
|
||||
### slowLogThreshold
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | When an operation execution time exceeds this threshold, the operation will be logged in slow log file |
|
||||
| Unit | second |
|
||||
| Default Value | 3 |
|
||||
| Note | All slow operations will be logged in file "taosSlowLog" in the log directory |
|
||||
|
||||
### slowLogScope
|
||||
|
||||
| Attribute | Description |
|
||||
| --------------- | ----------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Slow log type to be logged |
|
||||
| Optional Values | ALL, QUERY, INSERT, OTHERS, NONE |
|
||||
| Default Value | ALL |
|
||||
| Note | All slow operations will be logged by default, one option could be set |
|
||||
|
||||
### debugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | --------------------------------------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Log level |
|
||||
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
|
||||
| Default Value | 131 or 135, depending on the module |
|
||||
|
||||
### tmrDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Log level of timer module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### uDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ---------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | -------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Log level of common module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### rpcDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Log level of rpc module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### jniDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------ |
|
||||
| Applicable | Client Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------- |
|
||||
| Applicable | Client Only |
|
||||
| Meaning | Log level of jni module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### qDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Log level of query module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Meaning | Log level of query module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### cDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | --------------------- |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------- |
|
||||
| Applicable | Client Only |
|
||||
| Meaning | Log level of Client |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### dDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of dnode |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | 135 |
|
||||
| Default Value | 135 |
|
||||
|
||||
### vDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of vnode |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Default Value | |
|
||||
|
||||
### mDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of mnode module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | 135 |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of mnode module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | 135 |
|
||||
|
||||
### wDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of WAL module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | 135 |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of WAL module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | 135 |
|
||||
|
||||
### sDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Server and Client |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------ |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Log level of sync module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | 135 |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | 135 |
|
||||
|
||||
### tsdbDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of TSDB module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of TSDB module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### tqDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ----------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | Log level of TQ module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | Log level of TQ module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### fsDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ----------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | Log level of FS module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------- |
|
||||
| Applicable | Server only |
|
||||
| Meaning | Log level of FS module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### udfDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of UDF module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### smaDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of SMA module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of SMA module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### idxDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of index module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of index module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
### tdbDebugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of TDB module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
| Attribute | Description |
|
||||
| ------------- | ----------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Log level of TDB module |
|
||||
| Value Range | same as debugFlag |
|
||||
| Default Value | |
|
||||
|
||||
## Schemaless Parameters
|
||||
|
||||
### smlChildTableName
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Custom subtable name for schemaless writes |
|
||||
| Type | String |
|
||||
| Default Value | None |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------ |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Custom subtable name for schemaless writes |
|
||||
| Type | String |
|
||||
| Default Value | None |
|
||||
|
||||
### smlTagName
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------ |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Default tag for schemaless writes without tag value specified |
|
||||
| Type | String |
|
||||
| Default Value | _tag_null |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Default tag for schemaless writes without tag value specified |
|
||||
| Type | String |
|
||||
| Default Value | _tag_null |
|
||||
|
||||
### smlDataFormat
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ----------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
|
||||
| Value Range | 0: not consistent; 1: consistent. |
|
||||
| Default | 0 |
|
||||
| Attribute | Description |
|
||||
| ----------- | ----------------------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 |
|
||||
| Value Range | 0: not consistent; 1: consistent. |
|
||||
| Default | 0 |
|
||||
|
||||
## Compress Parameters
|
||||
|
||||
### compressMsgSize
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ----------------------------- |
|
||||
| Applicable | Both Client and Server side |
|
||||
| Meaning | Whether RPC message is compressed |
|
||||
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
||||
| Default | -1 |
|
||||
| Attribute | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------------------------------------------ |
|
||||
| Applicable | Both Client and Server side |
|
||||
| Meaning | Whether RPC message is compressed |
|
||||
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
||||
| Default | -1 |
|
||||
|
||||
|
||||
## Other Parameters
|
||||
|
||||
### enableCoreFile
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Whether to generate core file when server crashes |
|
||||
| Value Range | 0: false, 1: true |
|
||||
| Default Value | 1 |
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Applicable | Server and Client |
|
||||
| Meaning | Whether to generate core file when server crashes |
|
||||
| Value Range | 0: false, 1: true |
|
||||
| Default Value | 1 |
|
||||
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
|
||||
|
||||
### enableScience
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------- |
|
||||
| Applicable | Only taos-CLI client |
|
||||
| Meaning | Whether to show float and double with the scientific notation |
|
||||
| Value Range | 0: false, 1: true |
|
||||
| Default Value | 0 |
|
||||
|
||||
|
||||
### udf
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | ------------------ |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Whether the UDF service is enabled |
|
||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||
| Default Value | 1 |
|
||||
| Attribute | Description |
|
||||
| ------------- | ---------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Whether the UDF service is enabled |
|
||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||
| Default Value | 1 |
|
||||
|
||||
|
||||
## 3.0 Parameters
|
||||
|
||||
| # | **参数** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
||||
| 1 | firstEp | Yes | Yes | |
|
||||
| 2 | secondEp | Yes | Yes | |
|
||||
| 3 | fqdn | Yes | Yes | |
|
||||
| 4 | serverPort | Yes | Yes | |
|
||||
| 5 | maxShellConns | Yes | Yes | |
|
||||
| 6 | monitor | Yes | Yes | |
|
||||
| 7 | monitorFqdn | No | Yes | |
|
||||
| 8 | monitorPort | No | Yes | |
|
||||
| 9 | monitorInterval | Yes | Yes | |
|
||||
| 10 | queryPolicy | No | Yes | |
|
||||
| 11 | querySmaOptimize | No | Yes | |
|
||||
| 12 | maxNumOfDistinctRes | Yes | Yes | |
|
||||
| 15 | countAlwaysReturnValue | Yes | Yes | |
|
||||
| 16 | dataDir | Yes | Yes | |
|
||||
| 17 | minimalDataDirGB | Yes | Yes | |
|
||||
| 18 | supportVnodes | No | Yes | |
|
||||
| 19 | tempDir | Yes | Yes | |
|
||||
| 20 | minimalTmpDirGB | Yes | Yes | |
|
||||
| 21 | smlChildTableName | Yes | Yes | |
|
||||
| 22 | smlTagName | Yes | Yes | |
|
||||
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
|
||||
| 24 | statusInterval | Yes | Yes | |
|
||||
| 25 | logDir | Yes | Yes | |
|
||||
| 26 | minimalLogDirGB | Yes | Yes | |
|
||||
| 27 | numOfLogLines | Yes | Yes | |
|
||||
| 28 | asyncLog | Yes | Yes | |
|
||||
| 29 | logKeepDays | Yes | Yes | |
|
||||
| 30 | debugFlag | Yes | Yes | |
|
||||
| 31 | tmrDebugFlag | Yes | Yes | |
|
||||
| 32 | uDebugFlag | Yes | Yes | |
|
||||
| 33 | rpcDebugFlag | Yes | Yes | |
|
||||
| 34 | jniDebugFlag | Yes | Yes | |
|
||||
| 35 | qDebugFlag | Yes | Yes | |
|
||||
| 36 | cDebugFlag | Yes | Yes | |
|
||||
| 37 | dDebugFlag | Yes | Yes | |
|
||||
| 38 | vDebugFlag | Yes | Yes | |
|
||||
| 39 | mDebugFlag | Yes | Yes | |
|
||||
| 40 | wDebugFlag | Yes | Yes | |
|
||||
| 41 | sDebugFlag | Yes | Yes | |
|
||||
| 42 | tsdbDebugFlag | Yes | Yes | |
|
||||
| 43 | tqDebugFlag | No | Yes | |
|
||||
| 44 | fsDebugFlag | Yes | Yes | |
|
||||
| 45 | udfDebugFlag | No | Yes | |
|
||||
| 46 | smaDebugFlag | No | Yes | |
|
||||
| 47 | idxDebugFlag | No | Yes | |
|
||||
| 48 | tdbDebugFlag | No | Yes | |
|
||||
| 49 | metaDebugFlag | No | Yes | |
|
||||
| 50 | timezone | Yes | Yes | |
|
||||
| 51 | locale | Yes | Yes | |
|
||||
| 52 | charset | Yes | Yes | |
|
||||
| 53 | udf | Yes | Yes | |
|
||||
| 54 | enableCoreFile | Yes | Yes | |
|
||||
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||
| --- | :--------------------: | ---------------------- | ---------------------------- | ----------------------- |
|
||||
| 1 | firstEp | Yes | Yes | |
|
||||
| 2 | secondEp | Yes | Yes | |
|
||||
| 3 | fqdn | Yes | Yes | |
|
||||
| 4 | serverPort | Yes | Yes | |
|
||||
| 5 | maxShellConns | Yes | Yes | |
|
||||
| 6 | monitor | Yes | Yes | |
|
||||
| 7 | monitorFqdn | No | Yes | |
|
||||
| 8 | monitorPort | No | Yes | |
|
||||
| 9 | monitorInterval | Yes | Yes | |
|
||||
| 10 | queryPolicy | No | Yes | |
|
||||
| 11 | querySmaOptimize | No | Yes | |
|
||||
| 12 | maxNumOfDistinctRes | Yes | Yes | |
|
||||
| 15 | countAlwaysReturnValue | Yes | Yes | |
|
||||
| 16 | dataDir | Yes | Yes | |
|
||||
| 17 | minimalDataDirGB | Yes | Yes | |
|
||||
| 18 | supportVnodes | No | Yes | |
|
||||
| 19 | tempDir | Yes | Yes | |
|
||||
| 20 | minimalTmpDirGB | Yes | Yes | |
|
||||
| 21 | smlChildTableName | Yes | Yes | |
|
||||
| 22 | smlTagName | Yes | Yes | |
|
||||
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
|
||||
| 24 | statusInterval | Yes | Yes | |
|
||||
| 25 | logDir | Yes | Yes | |
|
||||
| 26 | minimalLogDirGB | Yes | Yes | |
|
||||
| 27 | numOfLogLines | Yes | Yes | |
|
||||
| 28 | asyncLog | Yes | Yes | |
|
||||
| 29 | logKeepDays | Yes | Yes | |
|
||||
| 30 | debugFlag | Yes | Yes | |
|
||||
| 31 | tmrDebugFlag | Yes | Yes | |
|
||||
| 32 | uDebugFlag | Yes | Yes | |
|
||||
| 33 | rpcDebugFlag | Yes | Yes | |
|
||||
| 34 | jniDebugFlag | Yes | Yes | |
|
||||
| 35 | qDebugFlag | Yes | Yes | |
|
||||
| 36 | cDebugFlag | Yes | Yes | |
|
||||
| 37 | dDebugFlag | Yes | Yes | |
|
||||
| 38 | vDebugFlag | Yes | Yes | |
|
||||
| 39 | mDebugFlag | Yes | Yes | |
|
||||
| 40 | wDebugFlag | Yes | Yes | |
|
||||
| 41 | sDebugFlag | Yes | Yes | |
|
||||
| 42 | tsdbDebugFlag | Yes | Yes | |
|
||||
| 43 | tqDebugFlag | No | Yes | |
|
||||
| 44 | fsDebugFlag | Yes | Yes | |
|
||||
| 45 | udfDebugFlag | No | Yes | |
|
||||
| 46 | smaDebugFlag | No | Yes | |
|
||||
| 47 | idxDebugFlag | No | Yes | |
|
||||
| 48 | tdbDebugFlag | No | Yes | |
|
||||
| 49 | metaDebugFlag | No | Yes | |
|
||||
| 50 | timezone | Yes | Yes | |
|
||||
| 51 | locale | Yes | Yes | |
|
||||
| 52 | charset | Yes | Yes | |
|
||||
| 53 | udf | Yes | Yes | |
|
||||
| 54 | enableCoreFile | Yes | Yes | |
|
||||
|
|
|
@ -90,7 +90,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
|||
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
||||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
:::
|
||||
|
||||
## Time resolution recognition
|
||||
|
|
|
@ -108,7 +108,7 @@ The following `launchctl` commands can help you manage taoskeeper service:
|
|||
|
||||
#### Launch With Configuration File
|
||||
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/taoskeeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
|
||||
```shell
|
||||
$ taoskeeper -c <keeper config file>
|
||||
|
@ -153,6 +153,10 @@ database = "log"
|
|||
|
||||
# standard tables to monitor
|
||||
tables = ["normal_table"]
|
||||
|
||||
# database options for db storing metrics data
|
||||
[metrics.databaseoptions]
|
||||
cachemodel = "none"
|
||||
```
|
||||
|
||||
### Obtain Monitoring Metrics
|
||||
|
@ -203,7 +207,7 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
|
|||
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
|
||||
```
|
||||
|
||||
### check_health
|
||||
### check\_health
|
||||
|
||||
```
|
||||
$ curl -i http://127.0.0.1:6043/check_health
|
||||
|
@ -219,3 +223,29 @@ Content-Length: 19
|
|||
|
||||
{"version":"1.0.0"}
|
||||
```
|
||||
|
||||
### taoskeeper with Prometheus
|
||||
|
||||
There is `/metrics` api in taoskeeper provide TDengine metric data for Prometheus.
|
||||
|
||||
#### scrape config
|
||||
|
||||
Scrape config in Prometheus specifies a set of targets and parameters describing how to scrape metric data from endpoint. For more information, please reference to [Prometheus documents](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||
|
||||
```
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
- job_name: "taoskeeper"
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
static_configs:
|
||||
- targets: ["localhost:6043"]
|
||||
```
|
||||
|
||||
#### Dashboard
|
||||
|
||||
There is a dashboard named `TaosKeeper Prometheus Dashboard for 3.x`, which provides a monitoring dashboard similar to TInsight.
|
||||
|
||||
In Grafana, click the Dashboard menu and click `import`, enter the dashboard ID `18587` and click the `Load` button. Then finished importing `TaosKeeper Prometheus Dashboard for 3.x` dashboard.
|
||||
|
||||
|
|
|
@ -200,11 +200,16 @@ As shown above, select the `TDengine` data source in the `Query` and enter the c
|
|||
- Group by column name(s): `group by` or `partition by` columns name split by comma. By setting `Group by column name(s)`, it can show multi-dimension data if Sql is `group by` or `partition by`. Such as, it can show data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep`.
|
||||
- Format to: format legend for `group by` or `partition by`. Such as it can display series data by `dnode_ep` if sql is `select _wstart as ts, avg(mem_system), dnode_ep from log.dnodes_info where ts>=$from and ts<=$to partition by dnode_ep interval($interval)` and `Group by column name(s)` is `dnode_ep` and `Format to` is `mem_system_{{dnode_ep}}`.
|
||||
|
||||
:::note
|
||||
|
||||
Since the REST connection because is stateless. Grafana plugin can use <db_name>.<table_name> in the SQL command to specify the database name.
|
||||
|
||||
:::
|
||||
|
||||
Follow the default prompt to query the average system memory usage for the specified interval on the server where the current TDengine deployment is located as follows.
|
||||
|
||||

|
||||
|
||||
查询每台 TDengine 服务器指定间隔系统内存平均使用量如下.
|
||||
The example to query the average system memory usage for the specified interval on each server as follows.
|
||||
|
||||

|
||||
|
@ -217,7 +222,7 @@ You can install TDinsight dashboard in data source configuration page (like `htt
|
|||
|
||||

|
||||
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
|
||||
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ Select "Rule" in the "Rule Engine" on the left and click the "Create" button: !
|
|||
|
||||
### Edit SQL fields
|
||||
|
||||
Copy SQL bellow and paste it to the SQL edit area:
|
||||
Copy SQL bellow and paste it to the SQL edit area:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
|
@ -76,7 +76,8 @@ Select "WebHook" and fill in the request URL as the address and port of the serv
|
|||
|
||||
### Edit "action"
|
||||
|
||||
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
||||
Edit the resource configuration to add the key/value pairing for Authorization. If you use the default TDengine username and password then the value of key Authorization is:
|
||||
|
||||
```
|
||||
Basic cm9vdDp0YW9zZGF0YQ==
|
||||
```
|
||||
|
|
|
@ -16,166 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
|||
|
||||

|
||||
|
||||
## What is Confluent?
|
||||
|
||||
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
|
||||
|
||||
1. Schema Registry
|
||||
2. REST Proxy
|
||||
3. Non-Java Clients
|
||||
4. Many packaged Kafka Connect plugins
|
||||
5. GUI for managing and monitoring Kafka - Confluent Control Center
|
||||
|
||||
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
|
||||

|
||||
|
||||
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Linux operating system
|
||||
2. Java 8 and Maven installed
|
||||
3. Git is installed
|
||||
3. Git/curl/vi is installed
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
||||
|
||||
## Install Confluent
|
||||
|
||||
Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
|
||||
## Install Kafka
|
||||
|
||||
Execute in any directory:
|
||||
|
||||
````
|
||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||
tar xzf confluent-7.1.1.tar.gz -C /opt/test
|
||||
````
|
||||
```shell
|
||||
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||
```
|
||||
|
||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
||||
Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
|
||||
|
||||
```title=".profile"
|
||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||
PATH=$CONFLUENT_HOME/bin
|
||||
export PATH
|
||||
export KAFKA_HOME=/opt/kafka
|
||||
export PATH=$PATH:$KAFKA_HOME/bin
|
||||
```
|
||||
|
||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||
|
||||
After the installation is complete, you can enter `confluent version` for simple verification:
|
||||
|
||||
```
|
||||
# confluent version
|
||||
confluent - Confluent CLI
|
||||
|
||||
Version: v2.6.1
|
||||
Git Ref: 6d920590
|
||||
Build Date: 2022-02-18T06:14:21Z
|
||||
Go Version: go1.17.6 (linux/amd64)
|
||||
Development: false
|
||||
```
|
||||
|
||||
## Install TDengine Connector plugin
|
||||
|
||||
### Install from source code
|
||||
|
||||
```
|
||||
```shell
|
||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||
cd kafka-connect-tdengine
|
||||
mvn clean package
|
||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
```
|
||||
|
||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
|
||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
|
||||
|
||||
### Install with confluent-hub
|
||||
### Add configuration file
|
||||
|
||||
[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`.
|
||||
**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
|
||||
add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
|
||||
|
||||
## Start Confluent
|
||||
|
||||
```
|
||||
confluent local services start
|
||||
```properties
|
||||
plugin.path=/usr/share/java,/opt/kafka/components
|
||||
```
|
||||
|
||||
:::note
|
||||
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
|
||||
:::
|
||||
## Start Kafka Services
|
||||
|
||||
:::tip
|
||||
If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
|
||||
Use command bellow to start all services:
|
||||
|
||||
```title="Console output log" {1}
|
||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
||||
Starting ZooKeeper
|
||||
ZooKeeper is [UP]
|
||||
Starting Kafka
|
||||
Kafka is [UP]
|
||||
Starting Schema Registry
|
||||
Schema Registry is [UP]
|
||||
Starting Kafka REST
|
||||
Kafka REST is [UP]
|
||||
Starting Connect
|
||||
Connect is [UP]
|
||||
Starting ksqlDB Server
|
||||
ksqlDB Server is [UP]
|
||||
Starting Control Center
|
||||
Control Center is [UP]
|
||||
```
|
||||
```shell
|
||||
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||
|
||||
To clear data, execute `rm -rf /tmp/confluent.106668`.
|
||||
:::
|
||||
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||
|
||||
### Check Confluent Services Status
|
||||
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||
|
||||
Use command bellow to check the status of all service:
|
||||
|
||||
```
|
||||
confluent local services status
|
||||
```
|
||||
|
||||
The expected output is:
|
||||
```
|
||||
Connect is [UP]
|
||||
Control Center is [UP]
|
||||
Kafka is [UP]
|
||||
Kafka REST is [UP]
|
||||
ksqlDB Server is [UP]
|
||||
Schema Registry is [UP]
|
||||
ZooKeeper is [UP]
|
||||
```
|
||||
|
||||
### Check Successfully Loaded Plugin
|
||||
|
||||
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
|
||||
```
|
||||
confluent local services connect plugin list
|
||||
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
|
||||
The output as bellow:
|
||||
|
||||
```txt
|
||||
[]
|
||||
```
|
||||
Available Connect Plugins:
|
||||
[
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"type": "sink",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"type": "source",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
......
|
||||
```
|
||||
|
||||
If not, please check the log file of Kafka Connect. To view the log file path, please execute:
|
||||
|
||||
```
|
||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
||||
```
|
||||
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
|
||||
|
||||
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
|
||||
|
||||
## The use of TDengine Sink Connector
|
||||
|
||||
|
@ -185,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
|
|||
|
||||
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
||||
|
||||
### Add configuration file
|
||||
### Add Sink Connector configuration file
|
||||
|
||||
```
|
||||
```shell
|
||||
mkdir ~/test
|
||||
cd ~/test
|
||||
vi sink-demo.properties
|
||||
vi sink-demo.json
|
||||
```
|
||||
|
||||
sink-demo.properties' content is following:
|
||||
sink-demo.json' content is following:
|
||||
|
||||
```ini title="sink-demo.properties"
|
||||
name=TDengineSinkConnector
|
||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
||||
tasks.max=1
|
||||
topics=meters
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.user=root
|
||||
connection.password=taosdata
|
||||
connection.database=power
|
||||
db.schemaless=line
|
||||
data.precision=ns
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="sink-demo.json"
|
||||
{
|
||||
"name": "TDengineSinkConnector",
|
||||
"config": {
|
||||
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.user": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "power",
|
||||
"db.schemaless": "line",
|
||||
"data.precision": "ns",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Key configuration instructions:
|
||||
|
||||
1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power.
|
||||
2. `db.schemaless=line` means the data in the InfluxDB Line protocol format.
|
||||
1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
|
||||
2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
|
||||
|
||||
### Create Connector instance
|
||||
### Create Sink Connector instance
|
||||
|
||||
````
|
||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
||||
````shell
|
||||
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
````
|
||||
|
||||
If the above command is executed successfully, the output is as follows:
|
||||
|
@ -238,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
|
|||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"name": "TDengineSinkConnector"
|
||||
"name": "TDengineSinkConnector",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||
},
|
||||
"tasks": [],
|
||||
"type": "sink"
|
||||
|
@ -258,8 +181,8 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
|
|||
|
||||
Use kafka-console-producer to write test data to the topic `meters`.
|
||||
|
||||
```
|
||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
||||
```shell
|
||||
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -270,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
|
|||
|
||||
Use the TDengine CLI to verify that the sync was successful.
|
||||
|
||||
```
|
||||
```sql
|
||||
taos> use power;
|
||||
Database changed.
|
||||
|
||||
taos> select * from meters;
|
||||
ts | current | voltage | phase | groupid | location |
|
||||
_ts | current | voltage | phase | groupid | location |
|
||||
===============================================================================================================================================================
|
||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||
|
@ -292,32 +215,39 @@ The role of the TDengine Source Connector is to push all the data of a specific
|
|||
|
||||
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
|
||||
|
||||
The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
|
||||
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
|
||||
|
||||
### Add configuration file
|
||||
### Add Source Connector configuration file
|
||||
|
||||
```
|
||||
vi source-demo.properties
|
||||
```shell
|
||||
vi source-demo.json
|
||||
```
|
||||
|
||||
Input following content:
|
||||
|
||||
```ini title="source-demo.properties"
|
||||
name=TDengineSourceConnector
|
||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
||||
tasks.max=1
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.username=root
|
||||
connection.password=taosdata
|
||||
connection.database=test
|
||||
connection.attempts=3
|
||||
connection.backoff.ms=5000
|
||||
topic.prefix=tdengine-source-
|
||||
poll.interval.ms=1000
|
||||
fetch.max.rows=100
|
||||
out.format=line
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="source-demo.json"
|
||||
{
|
||||
"name":"TDengineSourceConnector",
|
||||
"config":{
|
||||
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"tasks.max": 1,
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.username": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "test",
|
||||
"connection.attempts": 3,
|
||||
"connection.backoff.ms": 5000,
|
||||
"topic.prefix": "tdengine",
|
||||
"topic.delimiter": "-",
|
||||
"poll.interval.ms": 1000,
|
||||
"fetch.max.rows": 100,
|
||||
"topic.per.stable": true,
|
||||
"topic.ignore.db": false,
|
||||
"out.format": "line",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Prepare test data
|
||||
|
@ -329,45 +259,53 @@ DROP DATABASE IF EXISTS test;
|
|||
CREATE DATABASE test;
|
||||
USE test;
|
||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||
|
||||
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) \
|
||||
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) \
|
||||
d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) \
|
||||
d1002 USING meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) \
|
||||
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) \
|
||||
d1003 USING meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) \
|
||||
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) \
|
||||
d1004 USING meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||
```
|
||||
|
||||
Use TDengine CLI to execute SQL script
|
||||
|
||||
```
|
||||
```shell
|
||||
taos -f prepare-source-data.sql
|
||||
```
|
||||
|
||||
### Create Connector instance
|
||||
|
||||
````
|
||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
||||
````
|
||||
```shell
|
||||
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
### View topic data
|
||||
|
||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data.
|
||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-test-meters. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||
|
||||
````
|
||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||
````shell
|
||||
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
|
||||
````
|
||||
|
||||
output:
|
||||
|
||||
````
|
||||
```txt
|
||||
......
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||
......
|
||||
````
|
||||
```
|
||||
|
||||
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
|
||||
|
||||
````
|
||||
```sql
|
||||
USE test;
|
||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||
````
|
||||
```
|
||||
|
||||
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
|
||||
|
||||
|
@ -377,16 +315,16 @@ After testing, use the unload command to stop the loaded connector.
|
|||
|
||||
View currently active connectors:
|
||||
|
||||
````
|
||||
confluent local services connect connector status
|
||||
````
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||
|
||||
````
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
````
|
||||
```shell
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||
```
|
||||
|
||||
## Configuration reference
|
||||
|
||||
|
@ -417,24 +355,24 @@ The following configuration items apply to TDengine Sink Connector and TDengine
|
|||
### TDengine Source Connector specific configuration
|
||||
|
||||
1. `connection.database`: source database name, no default value.
|
||||
2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "".
|
||||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00".
|
||||
4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000.
|
||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
|
||||
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
|
||||
|
||||
2. `topic.prefix`: topic name prefix used when importing data into kafka. Its defaults value is empty string "".
|
||||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
||||
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
||||
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 0, this means to get all the data to the latest time.
|
||||
7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
|
||||
8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database>`.
|
||||
9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is `<topic.prefix><topic.delimiter><stable.name>`, false indicates that the rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`, and the default is false. Does not take effect when `topic.per.stable` is set to false.
|
||||
10. `topic.delimiter`: topic name delimiter,default is `-`。
|
||||
|
||||
## Other notes
|
||||
|
||||
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
|
||||
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
|
||||
1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
|
||||
|
||||
## Feedback
|
||||
|
||||
https://github.com/taosdata/kafka-connect-tdengine/issues
|
||||
<https://github.com/taosdata/kafka-connect-tdengine/issues>
|
||||
|
||||
## Reference
|
||||
|
||||
1. https://www.confluent.io/what-is-apache-kafka
|
||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
||||
3. https://docs.confluent.io/platform/current/platform.html
|
||||
1. For more information, see <https://kafka.apache.org/documentation/>
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine is a high-performance, scalable time-series database that supports SQL.
|
|||
|
||||
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
||||
|
||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
|
||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for "TDengine".
|
||||
|
||||

|
||||
|
||||
|
@ -30,8 +30,8 @@ After the connection is established, you can use Data Studio to process your dat
|
|||
|
||||

|
||||
|
||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
|
||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data - some examples are shown below.
|
||||
|
||||

|
||||
|
||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
|
||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we're sure you'll be able to gain new insights and obtain even more value from your data.
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
sidebar_label: DBeaver
|
||||
title: DBeaver
|
||||
description: You can use DBeaver to access your data stored in TDengine and TDengine Cloud.
|
||||
---
|
||||
|
||||
[DBeaver](https://dbeaver.io/) is a popular cross-platform database management tool that facilitates data management for developers, database administrators, data analysts, and other users. Starting from version 23.1.1, DBeaver natively supports TDengine and can be used to manage TDengine Cloud as well as TDengine clusters deployed on-premises.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To use DBeaver to manage TDengine, you need to prepare the following:
|
||||
|
||||
- Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps.
|
||||
- If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual.
|
||||
- If you use TDengine Cloud, please [register](https://cloud.tdengine.com/) for an account.
|
||||
|
||||
## Usage
|
||||
|
||||
### Use DBeaver to access on-premises TDengine cluster
|
||||
|
||||
1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category.
|
||||
|
||||

|
||||
|
||||
2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it.
|
||||
|
||||
)
|
||||
|
||||
3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct.
|
||||
|
||||

|
||||
|
||||
4. Use DBeaver to select databases and tables and browse your data stored in TDengine.
|
||||
|
||||

|
||||
|
||||
5. You can also manipulate TDengine data by executing SQL commands.
|
||||
|
||||

|
||||
|
||||
### Use DBeaver to access TDengine Cloud
|
||||
|
||||
1. Log in to the TDengine Cloud service, select **Programming** > **Java** in the management console, and then copy the string value of `TDENGINE_JDBC_URL` displayed in the **Config** section.
|
||||
|
||||

|
||||
|
||||
2. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine Cloud** in the **Timeseries** category.
|
||||
|
||||

|
||||
|
||||
3. Configure the TDengine Cloud connection by filling in the JDBC URL value. Click **Test Connection**. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine Cloud service is running properly and whether the JDBC URL is correct.
|
||||
|
||||

|
||||
|
||||
4. Use DBeaver to select databases and tables and browse your data stored in TDengine Cloud.
|
||||
|
||||

|
||||
|
||||
5. You can also manipulate TDengine Cloud data by executing SQL commands.
|
||||
|
||||

|
After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 70 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 35 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 59 KiB |
After Width: | Height: | Size: 57 KiB |
After Width: | Height: | Size: 41 KiB |