enh: merge 3.0

This commit is contained in:
Alex Duan 2025-01-24 15:48:34 +08:00
commit 2f3d302b6b
480 changed files with 23865 additions and 7290 deletions

View File

@ -1,49 +0,0 @@
version: 1.0.{build}
image:
- Visual Studio 2015
- macos
environment:
matrix:
- ARCH: amd64
- ARCH: x86
matrix:
exclude:
- image: macos
ARCH: x86
for:
-
matrix:
only:
- image: Visual Studio 2015
clone_folder: c:\dev\TDengine
clone_depth: 1
init:
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
before_build:
- cd c:\dev\TDengine
- md build
build_script:
- cd build
- cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false
- nmake install
-
matrix:
only:
- image: macos
clone_depth: 1
build_script:
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null
notifications:
- provider: Email
to:
- sangshuduo@gmail.com
on_build_success: true
on_build_failure: true
on_build_status_changed: true

View File

@ -1,13 +0,0 @@
# Use the latest 2.1 version of CircleCI pipeline process engine. See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1
# Use a package of configuration called an orb.
orbs:
# Declare a dependency on the welcome-orb
welcome: circleci/welcome-orb@0.4.1
# Orchestrate or schedule a set of jobs
workflows:
# Name the workflow "welcome"
welcome:
# Run the welcome/run job in its own container
jobs:
- welcome/run

View File

@ -1,266 +0,0 @@
---
kind: pipeline
name: test_amd64
platform:
os: linux
arch: amd64
steps:
- name: build
image: gcc
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: test_arm64_bionic
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: test_arm64_focal
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/ubuntu:focal
commands:
- echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
- apt-get update
- apt-get install -y -qq cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: test_arm64_centos7
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:7
commands:
- yum install -y gcc gcc-c++ make cmake git
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: test_arm64_centos8
platform:
os: linux
arch: arm64
steps:
- name: build
image: arm64v8/centos:8
commands:
- dnf install -y gcc gcc-c++ make cmake epel-release git libarchive
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: test_arm_bionic
platform:
os: linux
arch: arm
steps:
- name: build
image: arm32v7/ubuntu:bionic
commands:
- apt-get update
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake .. -DCPUTYPE=aarch32 > /dev/null
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: build_trusty
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:trusty
commands:
- apt-get update
- apt-get install -y gcc cmake3 build-essential git binutils-2.26
- mkdir debug
- cd debug
- cmake ..
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: build_xenial
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:xenial
commands:
- apt-get update
- apt-get install -y gcc cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: build_bionic
platform:
os: linux
arch: amd64
steps:
- name: build
image: ubuntu:bionic
commands:
- apt-get update
- apt-get install -y gcc cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0
---
kind: pipeline
name: build_centos7
platform:
os: linux
arch: amd64
steps:
- name: build
image: ansible/centos7-ansible
commands:
- yum install -y gcc gcc-c++ make cmake
- mkdir debug
- cd debug
- cmake ..
- make -j4
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- 2.0
- 3.0

26
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,26 @@
# reference
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
# merge team
# @guanshengliang Shengliang Guan
# @zitsen Linhe Huo
# @wingwing2005 Ya Qiang Li
# @feici02 WANG Xu
# @hzcheng Hongze Cheng
# @dapan1121 Pan Wei
# @sheyanjie-qq She Yanjie
# @pigzhou ZacharyZhou
* @taosdata/merge
/.github/ @feici02
/cmake/ @guanshengliang
/contrib/ @guanshengliang
/deps/ @guanshengliang
/docs/ @guanshengliang @zitsen
/examples/ @guanshengliang @zitsen
/include/ @guanshengliang @hzcheng @dapan1121
/packaging/ @feici02
/source/ @guanshengliang @hzcheng @dapan1121
/tests/ @guanshengliang @zitsen
/tools/ @guanshengliang @zitsen
/utils/ @guanshengliang

71
.github/workflows/taosd-ci-build.yml vendored Normal file
View File

@ -0,0 +1,71 @@
name: TDengine Build
on:
pull_request:
branches:
- 'main'
- '3.0'
- '3.1'
paths-ignore:
- 'docs/**'
- 'packaging/**'
- 'tests/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
name: Build and test
steps:
- name: Checkout the repository
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.18
- name: Install system dependencies
run: |
sudo apt update -y
sudo apt install -y build-essential cmake \
libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \
zlib1g pkg-config libssl-dev gawk
- name: Build and install TDengine
run: |
mkdir debug && cd debug
cmake .. -DBUILD_TOOLS=true \
-DBUILD_KEEPER=true \
-DBUILD_HTTP=false \
-DBUILD_TEST=true \
-DBUILD_DEPENDENCY_TESTS=false
make -j 4
sudo make install
which taosd
which taosadapter
which taoskeeper
- name: Start taosd
run: |
cp /etc/taos/taos.cfg ./
sudo echo "supportVnodes 256" >> taos.cfg
nohup sudo taosd -c taos.cfg &
- name: Start taosadapter
run: nohup sudo taosadapter &
- name: Run tests with taosBenchmark
run: |
taosBenchmark -t 10 -n 10 -y
taos -s "select count(*) from test.meters"
- name: Clean up
if: always()
run: |
if pgrep taosd; then sudo pkill taosd; fi
if pgrep taosadapter; then sudo pkill taosadapter; fi

View File

@ -1,4 +1,4 @@
name: TaosKeeper CI
name: taosKeeper CI
on:
push:

3
.gitignore vendored
View File

@ -156,6 +156,9 @@ pcre2_grep_test.sh
pcre2_chartables.c
geos-config
config.h
!contrib/xml2-cmake
!contrib/xml2-cmake/linux_x86_64/include/config.h
!contrib/xml2-cmake/CMakeLists.txt
pcre2.h
zconf.h
version.h

402
.lgtm.yml
View File

@ -1,402 +0,0 @@
##########################################################################################
# Customize file classifications. #
# Results from files under any classifier will be excluded from LGTM #
# statistics. #
##########################################################################################
##########################################################################################
# Use the `path_classifiers` block to define changes to the default classification of #
# files. #
##########################################################################################
path_classifiers:
# docs:
# Identify the top-level file called `generate_javadoc.py` as documentation-related.
test:
# Override LGTM's default classification of test files by excluding all files.
- exclude: /
# Classify all files in the top-level directories tests/ and testsuites/ as test code.
- tests
# - testsuites
# Classify all files with suffix `.test` as test code.
# Note: use only forward slash / as a path separator.
# Use ** to indicate an arbitrary parent path.
# Use * to indicate any sequence of characters excluding /.
# Always enclose the expression in double quotes if it includes *.
# - "**/*.test"
# Refine the classifications above by excluding files in test/util/.
# - exclude: test/util
# The default behavior is to tag all files created during the
# build as `generated`. Results are hidden for generated code. You can tag
# further files as being generated by adding them to the `generated` section.
generated:
# Exclude all `*.c` files under the `ui/` directory from classification as
# generated code.
# - exclude: ui/**/*.c
# By default, all files not checked into the repository are considered to be
# 'generated'.
# The default behavior is to tag library code as `library`. Results are hidden
# for library code. You can tag further files as being library code by adding them
# to the `library` section.
library:
- exclude: deps/
# The default behavior is to tag template files as `template`. Results are hidden
# for template files. You can tag further files as being template files by adding
# them to the `template` section.
template:
#- exclude: path/to/template/code/**/*.c
# Define your own category, for example: 'some_custom_category'.
some_custom_category:
# Classify all files in the top-level directory tools/ (or the top-level file
# called tools).
# - tools
#########################################################################################
# Use the `queries` block to change the default display of query results. #
#########################################################################################
# queries:
# Start by hiding the results of all queries.
# - exclude: "*"
# Then include all queries tagged 'security' and 'correctness', and with a severity of
# 'error'.
# - include:
# tags:
# - "security"
# - "correctness"
# severity: "error"
# Specifically hide the results of two queries.
# - exclude: cpp/use-of-goto
# - exclude: java/equals-on-unrelated-types
# Refine by including the `java/command-line-injection` query.
# - include: java/command-line-injection
#########################################################################################
# Define changes to the default code extraction process. #
# Each block configures the extraction of a single language, and modifies actions in a #
# named step. Every named step includes automatic default actions, #
# except for the 'prepare' step. The steps are performed in the following sequence: #
# prepare #
# after_prepare #
# configure (C/C++ only) #
# python_setup (Python only) #
# before_index #
# index #
##########################################################################################
#########################################################################################
# Environment variables available to the steps: #
#########################################################################################
# LGTM_SRC
# The root of the source tree.
# LGTM_WORKSPACE
# An existing (initially empty) folder outside the source tree.
# Used for temporary download and setup commands.
#########################################################################################
# Use the extraction block to define changes to the default code extraction process #
# for one or more languages. The settings for each language are defined in a child #
# block, with one or more steps. #
#########################################################################################
extraction:
# Define settings for C/C++ analysis
#####################################
cpp:
# The `prepare` step exists for customization on LGTM.com only.
prepare:
# # The `packages` section is valid for LGTM.com only. It names Ubuntu packages to
# # be installed.
packages:
- cmake
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# This step is useful for C/C++ analysis where you want to prepare the environment
# for the `configure` step without changing the default behavior for that step.
# after_prepare:
#- export GNU_MAKE=make
#- export GIT=true
# The `configure` step generates build configuration files which the `index` step
# then uses to build the codebase.
configure:
command:
- mkdir build
- cd build
- cmake ..
# - ./prepare_deps
# Optional step. You should add a `before_index` step if you need to run commands
# before the `index` step.
# before_index:
# - export BOOST_DIR=$LGTM_SRC/boost
# - export GTEST_DIR=$LGTM_SRC/googletest
# - export HUNSPELL_DIR=$LGTM_SRC/hunspell
# - export CRYPTOPP_DIR=$LGTM_SRC/cryptopp
# The `index` step builds the code and extracts information during the build
# process.
index:
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
build_command:
- cd build
- make
# - $GNU_MAKE -j2 -s
# Specify that all project or solution files should be used for extraction.
# Default: false.
# all_solutions: true
# Specify a list of one or more project or solution files for extraction.
# Default: LGTM chooses the file closest to the root of the repository (this may
# fail if there are multiple candidates).
# solution:
# - myProject.sln
# Specify MSBuild settings
# msbuild:
# Specify a list of additional arguments to MSBuild. Default: empty.
# arguments: /p:Platform=x64 /p:Configuration=Release
# Specify the MSBuild configuration to use, for example, debug or release.
# Default: read from the solution file or files.
# configuration:
# Specify the platform to target, for example: x86, x64, or Any CPU.
# Default: read from the solution file or files.
# platform:
# Specify the MSBuild target. Default: rebuild.
# target:
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
# nuget_restore: false
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
# build commands (build_command). For example:
# 10 for Visual Studio 2010
# 12 for Visual Studio 2012
# 14 for Visual Studio 2015
# 15 for Visual Studio 2017
# Default: read from project files.
# vstools_version: 10
# Define settings for C# analysis
##################################
# csharp:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step builds the code and extracts information during the build
# process.
#index:
# Specify that all project or solution files should be used for extraction.
# Default: false.
# all_solutions: true
# Specify a list of one or more project or solution files for extraction.
# Default: LGTM chooses the file closest to the root of the repository (this may
# fail if there are multiple candidates).
# solution:
# - myProject.sln
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command:
# - ./example-compile-all.sh
# By default, LGTM analyzes the code by building it. You can override this,
# and tell LGTM not to build the code. Beware that this can lead
# to less accurate results.
# buildless: true
# Specify .NET Core settings.
# dotnet:
# Specify additional arguments to `dotnet build`.
# Default: empty.
# arguments: "example_arg"
# Specify the version of .NET Core SDK to use.
# Default: The version installed on the build machine.
# version: 2.1
# Specify MSBuild settings.
# msbuild:
# Specify a list of additional arguments to MSBuild. Default: empty.
# arguments: /P:WarningLevel=2
# Specify the MSBuild configuration to use, for example, debug or release.
# Default: read from the solution file or files.
# configuration: release
# Specify the platform to target, for example: x86, x64, or Any CPU.
# Default: read from the solution file or files.
# platform: x86
# Specify the MSBuild target. Default: rebuild.
# target: notest
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
# nuget_restore: false
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
# build commands (build_command). For example:
# 10 for Visual Studio 2010
# 12 for Visual Studio 2012
# 14 for Visual Studio 2015
# 15 for Visual Studio 2017
# Default: read from project files
# vstools_version: 10
# Specify additional options for the extractor,
# for example --fast to perform a faster extraction that produces a smaller
# database.
# extractor: "--fast"
# Define settings for Go analysis
##################################
# go:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step builds the code and extracts information during the build
# process.
# index:
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command:
# - ./compile-all.sh
# Define settings for Java analysis
####################################
# java:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step extracts information from the files in the codebase.
# index:
# Specify Gradle settings.
# gradle:
# Specify the required Gradle version.
# Default: determined automatically.
# version: 4.4
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command: ./compile-all.sh
# Specify the Java version required to build the project.
# java_version: 11
# Specify whether to extract Java .properties files
# Default: false
# properties_files: true
# Specify Maven settings.
# maven:
# Specify the path (absolute or relative) of a Maven settings file to use.
# Default: Maven uses a settings file in the default location, if it exists.
# settings_file: /opt/share/settings.xml
# Specify the path of a Maven toolchains file.
# Default: Maven uses a toolchains file in the default location, if it exists.
# toolchains_file: /opt/share/toolchains.xml
# Specify the required Maven version.
# Default: the Maven version is determined automatically, where feasible.
# version: 3.5.2
# Specify how XML files should be extracted:
# all = extract all XML files.
# default = only extract XML files named `AndroidManifest.xml`, `pom.xml`, and `web.xml`.
# disabled = do not extract any XML files.
# xml_mode: all
# Define settings for JavaScript analysis
##########################################
# javascript:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step extracts information from the files in the codebase.
# index:
# Specify a list of files and folders to extract.
# Default: The project root directory.
# include:
# - src/js
# Specify a list of files and folders to exclude from extraction.
# exclude:
# - thirdparty/lib
# You can add additional file types for LGTM to extract, by mapping file
# extensions (including the leading dot) to file types. The usual
# include/exclude patterns apply, so, for example, `.jsm` files under
# `thirdparty/lib` will not be extracted.
# filetypes:
# ".jsm": "js"
# ".tmpl": "html"
# Specify a list of glob patterns to include/exclude files from extraction; this
# is applied on top of the include/exclude paths from above; patterns are
# processed in the same way as for path classifiers above.
# Default: include all files with known extensions (such as .js, .ts and .html),
# but exclude files ending in `-min.js` or `.min.js` and folders named `node_modules`
# or `bower_components`
# filters:
# exclude any *.ts files anywhere.
# - exclude: "**/*.ts"
# but include *.ts files under src/js/typescript.
# - include: "src/js/typescript/**/*.ts"
# Specify how TypeScript files should be extracted:
# none = exclude all TypeScript files.
# basic = extract syntactic information from TypeScript files.
# full = extract syntactic and type information from TypeScript files.
# Default: full.
# typescript: basic
# By default, LGTM doesn't extract any XML files. You can override this by
# using the `xml_mode` property and setting it to `all`.
# xml_mode: all
# Define settings for Python analysis
######################################
# python:
# # The `prepare` step exists for customization on LGTM.com only.
# # prepare:
# # # The `packages` section is valid for LGTM.com only. It names packages to
# # # be installed.
# # packages: libpng-dev
# # This step is useful for Python analysis where you want to prepare the
# # environment for the `python_setup` step without changing the default behavior
# # for that step.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# # This sets up the Python interpreter and virtual environment, ready for the
# # `index` step to extract the codebase.
# python_setup:
# # Specify packages that should NOT be installed despite being mentioned in the
# # requirements.txt file.
# # Default: no package marked for exclusion.
# exclude_requirements:
# - pywin32
# # Specify a list of pip packages to install.
# # If any of these packages cannot be installed, the extraction will fail.
# requirements:
# - Pillow
# # Specify a list of requirements text files to use to set up the environment,
# # or false for none. Default: any requirements.txt, test-requirements.txt,
# # and similarly named files identified in the codebase are used.
# requirements_files:
# - required-packages.txt
# # Specify a setup.py file to use to set up the environment, or false for none.
# # Default: any setup.py files identified in the codebase are used in preference
# # to any requirements text files.
# setup_py: new-setup.py
# # Override the version of the Python interpreter used for setup and extraction
# # Default: Python 3.
# version: 2
# # Optional step. You should add a `before_index` step if you need to run commands
# # before the `index` step.
# before_index:
# - antlr4 -Dlanguage=Python3 Grammar.g4
# # The `index` step extracts information from the files in the codebase.
# index:
# # Specify a list of files and folders to exclude from extraction.
# # Default: Git submodules and Subversion externals.
# exclude:
# - legacy-implementation
# - thirdparty/libs
# filters:
# - exclude: "**/documentation/examples/snippets/*.py"
# - include: "**/documentation/examples/test_application/*"
# include:
# - example/to/include

View File

@ -7,6 +7,9 @@ file_zh_changed = ''
file_en_changed = ''
file_no_doc_changed = '1'
file_only_tdgpt_change_except = '1'
tdgpt_file = "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics"
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
@ -67,7 +70,7 @@ def check_docs(){
returnStdout: true
)
file_no_doc_changed = sh (
def file_no_doc_changed = sh (
script: '''
cd ${WKC}
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" || :
@ -78,7 +81,7 @@ def check_docs(){
file_only_tdgpt_change_except = sh (
script: '''
cd ${WKC}
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || :
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" ||:
''',
returnStdout: true
).trim()
@ -365,8 +368,8 @@ def pre_test_build_win() {
'''
bat '''
cd %WIN_COMMUNITY_ROOT%/tests/ci
pip3 install taospy==2.7.16
pip3 install taos-ws-py==0.3.5
pip3 install taospy==2.7.21
pip3 install taos-ws-py==0.3.8
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1
@ -570,7 +573,7 @@ pipeline {
cd ${WKC}/tests/parallel_test
./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + '''
'''
if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task/ ) {
if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) {
sh '''
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2

View File

@ -1,6 +1,5 @@
<p>
<p align="center">
<a href="https://tdengine.com" target="_blank">
<a href="https://www.taosdata.com" target="_blank">
<img
src="docs/assets/tdengine.svg"
alt="TDengine"
@ -8,14 +7,8 @@
/>
</a>
</p>
<p>
[![Build Status](https://travis-ci.org/taosdata/TDengine.svg?branch=master)](https://travis-ci.org/taosdata/TDengine)
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/careers/)
# TDengine 简介

401
README.md
View File

@ -1,4 +1,3 @@
<p>
<p align="center">
<a href="https://tdengine.com" target="_blank">
<img
@ -8,9 +7,13 @@
/>
</a>
</p>
<p>
[![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/taosdata/tdengine/taosd-ci-build.yml)](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
![GitHub commit activity](https://img.shields.io/github/commit-activity/m/taosdata/tdengine)
<br />
![GitHub Release](https://img.shields.io/github/v/release/taosdata/tdengine)
![GitHub License](https://img.shields.io/github/license/taosdata/tdengine)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
<br />
[![Twitter Follow](https://img.shields.io/twitter/follow/tdenginedb?label=TDengine&style=social)](https://twitter.com/tdenginedb)
@ -23,24 +26,33 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine
# Table of Contents
1. [What is TDengine?](#1-what-is-tdengine)
2. [Documentation](#2-documentation)
3. [Building](#3-building)
1. [Install build tools](#31-install-build-tools)
1. [Get the source codes](#32-get-the-source-codes)
1. [Special Note](#33-special-note)
1. [Build TDengine](#34-build-tdengine)
4. [Installing](#4-installing)
1. [On Linux platform](#41-on-linux-platform)
1. [On Windows platform](#42-on-windows-platform)
1. [On macOS platform](#43-on-macos-platform)
1. [Quick Run](#44-quick-run)
5. [Try TDengine](#5-try-tdengine)
6. [Developing with TDengine](#6-developing-with-tdengine)
7. [Contribute to TDengine](#7-contribute-to-tdengine)
8. [Join the TDengine Community](#8-join-the-tdengine-community)
1. [Introduction](#1-introduction)
1. [Documentation](#2-documentation)
1. [Prerequisites](#3-prerequisites)
- [3.1 Prerequisites On Linux](#31-on-linux)
- [3.2 Prerequisites On macOS](#32-on-macos)
- [3.3 Prerequisites On Windows](#33-on-windows)
- [3.4 Clone the repo](#34-clone-the-repo)
1. [Building](#4-building)
- [4.1 Build on Linux](#41-build-on-linux)
- [4.2 Build on macOS](#42-build-on-macos)
- [4.3 Build On Windows](#43-build-on-windows)
1. [Packaging](#5-packaging)
1. [Installation](#6-installation)
- [6.1 Install on Linux](#61-install-on-linux)
- [6.2 Install on macOS](#62-install-on-macos)
- [6.3 Install on Windows](#63-install-on-windows)
1. [Running](#7-running)
- [7.1 Run TDengine on Linux](#71-run-tdengine-on-linux)
- [7.2 Run TDengine on macOS](#72-run-tdengine-on-macos)
- [7.3 Run TDengine on Windows](#73-run-tdengine-on-windows)
1. [Testing](#8-testing)
1. [Releasing](#9-releasing)
1. [Workflow](#10-workflow)
1. [Coverage](#11-coverage)
1. [Contributing](#12-contributing)
# 1. What is TDengine
# 1. Introduction
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
@ -62,132 +74,91 @@ For a full list of TDengine competitive advantages, please [check here](https://
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
# 3. Building
# 3. Prerequisites
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
## 3.1 On Linux
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
<details>
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
<summary>Install required tools on Linux</summary>
To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory.
## 3.1 Install build tools
### Ubuntu 18.04 and above or Debian
### For Ubuntu 18.04、20.04、22.04
```bash
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
sudo apt-get udpate
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
```
#### Install build dependencies for taosTools
To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
### For CentOS 8
```bash
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
```
### CentOS 7.9
```bash
sudo yum install epel-release
sudo yum update
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core
yum config-manager --set-enabled powertools
yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static
```
### CentOS 8/Fedora/Rocky Linux
</details>
## 3.2 On macOS
<details>
<summary>Install required tools on macOS</summary>
Please intall the dependencies with [brew](https://brew.sh/).
```bash
sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
```
#### Install build dependencies for taosTools on CentOS
#### CentOS 7.9
```
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
```
#### CentOS 8/Fedora/Rocky Linux
```
sudo yum install -y epel-release
sudo yum install -y dnf-plugins-core
sudo yum config-manager --set-enabled powertools
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
```
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well.
If the PowerTools installation fails, you can try to use:
```
sudo yum config-manager --set-enabled powertools
```
#### For CentOS + devtoolset
Besides above dependencies, please run following commands:
```
sudo yum install centos-release-scl
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
scl enable devtoolset-9 -- bash
```
### macOS
```
brew install argp-standalone gflags pkgconfig
```
### Setup golang environment
</details>
TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup.
## 3.3 On Windows
Please use version 1.20+. For the user in China, we recommend using a proxy to accelerate package downloading.
<details>
```
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
<summary>Install required tools on Windows</summary>
The default will not build taosAdapter, but you can use the following command to build taosAdapter as the service for RESTful interface.
Work in Progress.
```
cmake .. -DBUILD_HTTP=false
```
</details>
### Setup rust environment
## 3.4 Clone the repo
TDengine includes a few components developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup.
<details>
## 3.2 Get the source codes
<summary>Clone the repo</summary>
First of all, you may clone the source codes from github:
Clone the repository to the target machine:
```bash
git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail.
```
[url "git@github.com:"]
insteadOf = https://github.com/
```
> **NOTE:**
> TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust).
## 3.3 Special Note
</details>
[JDBC Connector](https://github.com/taosdata/taos-connector-jdbc) [Go Connector](https://github.com/taosdata/driver-go)[Python Connector](https://github.com/taosdata/taos-connector-python)[Node.js Connector](https://github.com/taosdata/taos-connector-node)[C# Connector](https://github.com/taosdata/taos-connector-dotnet) [Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository.
# 4. Building
## 3.4 Build TDengine
At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
### On Linux platform
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/deploy-in-docker/), [installation package](https://docs.tdengine.com/get-started/deploy-from-package/) or [Kubernetes](https://docs.tdengine.com/operations-and-maintenance/deploy-your-cluster/#kubernetes-deployment). This quick guide only applies to install from source.
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory.
## 4.1 Build on Linux
<details>
<summary>Detailed steps to build on Linux</summary>
You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below:
@ -198,29 +169,46 @@ You can run the bash script `build.sh` to build both TDengine and taosTools incl
It equals to execute following commands:
```bash
mkdir debug
cd debug
mkdir debug && cd debug
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
make
```
You can use Jemalloc as memory allocator instead of glibc:
```
apt install autoconf
```bash
cmake .. -DJEMALLOC_ENABLED=true
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform.
You can also specify CPUTYPE option like aarch64 too if the detection result is not correct:
aarch64:
TDengine build script can auto-detect the host machine's architecture on x86, x86-64, arm64 platform.
You can also specify architecture manually by CPUTYPE option:
```bash
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
### On Windows platform
</details>
## 4.2 Build on macOS
<details>
<summary>Detailed steps to build on macOS</summary>
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
```shell
mkdir debug && cd debug
cmake .. && cmake --build .
```
</details>
## 4.3 Build on Windows
<details>
<summary>Detailed steps to build on Windows</summary>
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat.
@ -251,31 +239,67 @@ mkdir debug && cd debug
cmake .. -G "NMake Makefiles"
nmake
```
</details>
### On macOS platform
# 5. Packaging
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
The TDengine community installer can NOT be created by this repository only, due to some component dependencies. We are still working on this improvement.
```shell
mkdir debug && cd debug
cmake .. && cmake --build .
```
# 6. Installation
# 4. Installing
## 6.1 Install on Linux
## 4.1 On Linux platform
<details>
After building successfully, TDengine can be installed by
<summary>Detailed steps to install on Linux</summary>
After building successfully, TDengine can be installed by:
```bash
sudo make install
```
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
Installing from source code will also configure service management for TDengine. Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it.
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
</details>
To start the service after installation, in a terminal, use:
## 6.2 Install on macOS
<details>
<summary>Detailed steps to install on macOS</summary>
After building successfully, TDengine can be installed by:
```bash
sudo make install
```
</details>
## 6.3 Install on Windows
<details>
<summary>Detailed steps to install on windows</summary>
After building successfully, TDengine can be installed by:
```cmd
nmake install
```
</details>
# 7. Running
## 7.1 Run TDengine on Linux
<details>
<summary>Detailed steps to run on Linux</summary>
To start the service after installation on linux, in a terminal, use:
```bash
sudo systemctl start taosd
@ -289,27 +313,29 @@ taos
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
## 4.2 On Windows platform
After building successfully, TDengine can be installed by:
```cmd
nmake install
```
## 4.3 On macOS platform
After building successfully, TDengine can be installed by:
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
```bash
sudo make install
./build/bin/taosd -c test/cfg
```
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
In another terminal, use the TDengine CLI to connect the server:
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
```bash
./build/bin/taos -c test/cfg
```
To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use:
Option `-c test/cfg` specifies the system configuration file directory.
</details>
## 7.2 Run TDengine on macOS
<details>
<summary>Detailed steps to run on macOS</summary>
To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use:
```bash
sudo launchctl start com.tdengine.taosd
@ -323,64 +349,63 @@ taos
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
## 4.4 Quick Run
</details>
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
```bash
./build/bin/taosd -c test/cfg
## 7.3 Run TDengine on Windows
<details>
<summary>Detailed steps to run on windows</summary>
You can start TDengine server on Windows platform with below commands:
```cmd
.\build\bin\taosd.exe -c test\cfg
```
In another terminal, use the TDengine CLI to connect the server:
```bash
./build/bin/taos -c test/cfg
```cmd
.\build\bin\taos.exe -c test\cfg
```
option "-c test/cfg" specifies the system configuration file directory.
# 5. Try TDengine
</details>
It is easy to run SQL commands from TDengine CLI which is the same as other SQL databases.
# 8. Testing
```sql
CREATE DATABASE demo;
USE demo;
CREATE TABLE t (ts TIMESTAMP, speed INT);
INSERT INTO t VALUES('2019-07-15 00:00:00', 10);
INSERT INTO t VALUES('2019-07-15 01:00:00', 20);
SELECT * FROM t;
ts | speed |
===================================
19-07-15 00:00:00.000| 10|
19-07-15 01:00:00.000| 20|
Query OK, 2 row(s) in set (0.001700s)
For how to run different types of tests on TDengine, please see [Testing TDengine](./tests/README.md).
# 9. Releasing
For the complete list of TDengine Releases, please see [Releases](https://github.com/taosdata/TDengine/releases).
# 10. Workflow
TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). More workflows will be available soon.
# 11. Coverage
Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
<details>
<summary>How to run the coverage report locally?</summary>
To create the test coverage report (in HTML format) locally, please run following commands:
```bash
cd tests
bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
# on main branch and run cases in longtimeruning_cases.task
# for more infomation about options please refer to ./run_local_coverage.sh -h
```
> **NOTE:**
> Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time.
# 6. Developing with TDengine
</details>
## Official Connectors
# 12. Contributing
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
- [Java](https://docs.tdengine.com/reference/connectors/java/)
- [C/C++](https://docs.tdengine.com/reference/connectors/cpp/)
- [Python](https://docs.tdengine.com/reference/connectors/python/)
- [Go](https://docs.tdengine.com/reference/connectors/go/)
- [Node.js](https://docs.tdengine.com/reference/connectors/node/)
- [Rust](https://docs.tdengine.com/reference/connectors/rust/)
- [C#](https://docs.tdengine.com/reference/connectors/csharp/)
- [RESTful API](https://docs.tdengine.com/reference/connectors/rest-api/)
# 7. Contribute to TDengine
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
# 8. Join the TDengine Community
For more information about TDengine, you can follow us on social media and join our Discord server:
- [Discord](https://discord.com/invite/VZdSuUg4pS)
- [Twitter](https://twitter.com/TDengineDB)
- [LinkedIn](https://www.linkedin.com/company/tdengine/)
- [YouTube](https://www.youtube.com/@tdengine)
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine.

View File

@ -2,7 +2,7 @@
# addr2line
ExternalProject_Add(addr2line
GIT_REPOSITORY https://github.com/davea42/libdwarf-addr2line.git
GIT_TAG master
GIT_TAG main
SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line"
BINARY_DIR "${TD_CONTRIB_DIR}/addr2line"
CONFIGURE_COMMAND ""

View File

@ -2,6 +2,7 @@
ExternalProject_Add(azure
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
DEPENDS xml2
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.3.5.0.alpha")
SET(TD_VER_NUMBER "3.3.5.2.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -12,7 +12,7 @@ ExternalProject_Add(curl2
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
BUILD_COMMAND make -j
INSTALL_COMMAND make install
TEST_COMMAND ""

View File

@ -6,9 +6,9 @@ ExternalProject_Add(openssl
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/openssl"
BUILD_IN_SOURCE TRUE
#BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
BUILD_ALWAYS 1
UPDATE_COMMAND ""
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
BUILD_COMMAND make -j
INSTALL_COMMAND make install_sw -j
TEST_COMMAND ""

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG main
GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -0,0 +1,13 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
GIT_TAG main
GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -1,19 +1,16 @@
# xml2
ExternalProject_Add(xml2
URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz
URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6
#https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz
#GIT_REPOSITORY https://github.com/GNOME/libxml2
#GIT_TAG v2.11.5
URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.10.4.tar.gz
URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
SOURCE_DIR "${TD_CONTRIB_DIR}/xml2"
SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
BUILD_COMMAND make -j
INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
GIT_SHALLOW true
)

View File

@ -17,7 +17,6 @@ elseif(${BUILD_WITH_COS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
@ -141,11 +140,16 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# libcurl
if(NOT ${TD_WINDOWS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(NOT ${TD_WINDOWS})
# s3
if(${BUILD_WITH_S3})
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_S3)
@ -155,7 +159,6 @@ elseif(${BUILD_WITH_COS})
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif()
@ -194,6 +197,11 @@ endif()
# lemon
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers
IF(${TD_DARWIN})
SET(CONTRIB_CONFIG_ENV "CC=cc")
ENDIF()
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
@ -647,7 +655,12 @@ if(${BUILD_PCRE2})
endif(${BUILD_PCRE2})
if(${TD_LINUX} AND ${BUILD_WITH_S3})
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
set(ORIG_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
add_subdirectory(xml2-cmake)
set(CMAKE_C_FLAGS ${ORIG_CMAKE_C_FLAGS})
add_subdirectory(azure-cmake)
endif()
IF(TD_LINUX)

View File

@ -36,10 +36,6 @@ target_include_directories(
)
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
# find_library(CURL_LIBRARY curl)
# find_library(XML2_LIBRARY xml2)
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
@ -50,9 +46,8 @@ target_link_libraries(
PRIVATE ${CURL_LIBRARY}
PRIVATE ${SSL_LIBRARY}
PRIVATE ${CRYPTO_LIBRARY}
PRIVATE ${XML2_LIBRARY}
# PRIVATE xml2
PRIVATE _libxml2
PRIVATE zlib
# PRIVATE ${CoreFoundation_Library}

View File

@ -0,0 +1,58 @@
set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2")
set(SRCS
"${LIBXML2_SOURCE_DIR}/SAX.c"
"${LIBXML2_SOURCE_DIR}/entities.c"
"${LIBXML2_SOURCE_DIR}/encoding.c"
"${LIBXML2_SOURCE_DIR}/error.c"
"${LIBXML2_SOURCE_DIR}/parserInternals.c"
"${LIBXML2_SOURCE_DIR}/parser.c"
"${LIBXML2_SOURCE_DIR}/tree.c"
"${LIBXML2_SOURCE_DIR}/hash.c"
"${LIBXML2_SOURCE_DIR}/list.c"
"${LIBXML2_SOURCE_DIR}/xmlIO.c"
"${LIBXML2_SOURCE_DIR}/xmlmemory.c"
"${LIBXML2_SOURCE_DIR}/uri.c"
"${LIBXML2_SOURCE_DIR}/valid.c"
"${LIBXML2_SOURCE_DIR}/xlink.c"
"${LIBXML2_SOURCE_DIR}/HTMLparser.c"
"${LIBXML2_SOURCE_DIR}/HTMLtree.c"
"${LIBXML2_SOURCE_DIR}/debugXML.c"
"${LIBXML2_SOURCE_DIR}/xpath.c"
"${LIBXML2_SOURCE_DIR}/xpointer.c"
"${LIBXML2_SOURCE_DIR}/xinclude.c"
"${LIBXML2_SOURCE_DIR}/nanohttp.c"
"${LIBXML2_SOURCE_DIR}/nanoftp.c"
"${LIBXML2_SOURCE_DIR}/catalog.c"
"${LIBXML2_SOURCE_DIR}/globals.c"
"${LIBXML2_SOURCE_DIR}/threads.c"
"${LIBXML2_SOURCE_DIR}/c14n.c"
"${LIBXML2_SOURCE_DIR}/xmlstring.c"
"${LIBXML2_SOURCE_DIR}/buf.c"
"${LIBXML2_SOURCE_DIR}/xmlregexp.c"
"${LIBXML2_SOURCE_DIR}/xmlschemas.c"
"${LIBXML2_SOURCE_DIR}/xmlschemastypes.c"
"${LIBXML2_SOURCE_DIR}/xmlunicode.c"
"${LIBXML2_SOURCE_DIR}/triostr.c"
"${LIBXML2_SOURCE_DIR}/xmlreader.c"
"${LIBXML2_SOURCE_DIR}/relaxng.c"
"${LIBXML2_SOURCE_DIR}/dict.c"
"${LIBXML2_SOURCE_DIR}/SAX2.c"
"${LIBXML2_SOURCE_DIR}/xmlwriter.c"
"${LIBXML2_SOURCE_DIR}/legacy.c"
"${LIBXML2_SOURCE_DIR}/chvalid.c"
"${LIBXML2_SOURCE_DIR}/pattern.c"
"${LIBXML2_SOURCE_DIR}/xmlsave.c"
"${LIBXML2_SOURCE_DIR}/xmlmodule.c"
"${LIBXML2_SOURCE_DIR}/schematron.c"
"${LIBXML2_SOURCE_DIR}/xzlib.c"
)
add_library(_libxml2 ${SRCS})
#target_link_libraries(_libxml2 PRIVATE td_contrib::zlib)
target_link_libraries(_libxml2 PRIVATE zlib)
target_include_directories(_libxml2 BEFORE PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include")
target_include_directories(_libxml2 BEFORE PUBLIC "${LIBXML2_SOURCE_DIR}/include")
add_library(td_contrib::libxml2 ALIAS _libxml2)

View File

@ -0,0 +1,285 @@
/* config.h. Generated from config.h.in by configure. */
/* config.h.in. Generated from configure.ac by autoheader. */
/* Type cast for the gethostbyname() argument */
#define GETHOSTBYNAME_ARG_CAST /**/
/* Define to 1 if you have the <arpa/inet.h> header file. */
#define HAVE_ARPA_INET_H 1
/* Define to 1 if you have the <arpa/nameser.h> header file. */
#define HAVE_ARPA_NAMESER_H 1
/* Whether struct sockaddr::__ss_family exists */
/* #undef HAVE_BROKEN_SS_FAMILY */
/* Define to 1 if you have the <ctype.h> header file. */
#define HAVE_CTYPE_H 1
/* Define to 1 if you have the <dirent.h> header file. */
#define HAVE_DIRENT_H 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Have dlopen based dso */
#define HAVE_DLOPEN /**/
/* Define to 1 if you have the <dl.h> header file. */
/* #undef HAVE_DL_H */
/* Define to 1 if you have the <errno.h> header file. */
#define HAVE_ERRNO_H 1
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the <float.h> header file. */
#define HAVE_FLOAT_H 1
/* Define to 1 if you have the `fprintf' function. */
#define HAVE_FPRINTF 1
/* Define to 1 if you have the `ftime' function. */
#define HAVE_FTIME 1
/* Define if getaddrinfo is there */
#define HAVE_GETADDRINFO /**/
/* Define to 1 if you have the `gettimeofday' function. */
#define HAVE_GETTIMEOFDAY 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the `isascii' function. */
#define HAVE_ISASCII 1
/* Define if isinf is there */
#define HAVE_ISINF /**/
/* Define if isnan is there */
#define HAVE_ISNAN /**/
/* Define if history library is there (-lhistory) */
/* #undef HAVE_LIBHISTORY */
/* Define if pthread library is there (-lpthread) */
#define HAVE_LIBPTHREAD /**/
/* Define if readline library is there (-lreadline) */
/* #undef HAVE_LIBREADLINE */
/* Define to 1 if you have the <limits.h> header file. */
#define HAVE_LIMITS_H 1
/* Define to 1 if you have the `localtime' function. */
#define HAVE_LOCALTIME 1
/* Define to 1 if you have the <lzma.h> header file. */
/* #undef HAVE_LZMA_H */
/* Define to 1 if you have the <malloc.h> header file. */
#define HAVE_MALLOC_H 1
/* Define to 1 if you have the <math.h> header file. */
#define HAVE_MATH_H 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the `mmap' function. */
#define HAVE_MMAP 1
/* Define to 1 if you have the `munmap' function. */
#define HAVE_MUNMAP 1
/* mmap() is no good without munmap() */
#if defined(HAVE_MMAP) && !defined(HAVE_MUNMAP)
# undef /**/ HAVE_MMAP
#endif
/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
/* #undef HAVE_NDIR_H */
/* Define to 1 if you have the <netdb.h> header file. */
#define HAVE_NETDB_H 1
/* Define to 1 if you have the <netinet/in.h> header file. */
#define HAVE_NETINET_IN_H 1
/* Define to 1 if you have the <poll.h> header file. */
#define HAVE_POLL_H 1
/* Define to 1 if you have the `printf' function. */
#define HAVE_PRINTF 1
/* Define if <pthread.h> is there */
#define HAVE_PTHREAD_H /**/
/* Define to 1 if you have the `putenv' function. */
#define HAVE_PUTENV 1
/* Define to 1 if you have the `rand' function. */
#define HAVE_RAND 1
/* Define to 1 if you have the `rand_r' function. */
#define HAVE_RAND_R 1
/* Define to 1 if you have the <resolv.h> header file. */
#define HAVE_RESOLV_H 1
/* Have shl_load based dso */
/* #undef HAVE_SHLLOAD */
/* Define to 1 if you have the `signal' function. */
#define HAVE_SIGNAL 1
/* Define to 1 if you have the <signal.h> header file. */
#define HAVE_SIGNAL_H 1
/* Define to 1 if you have the `snprintf' function. */
#define HAVE_SNPRINTF 1
/* Define to 1 if you have the `sprintf' function. */
#define HAVE_SPRINTF 1
/* Define to 1 if you have the `srand' function. */
#define HAVE_SRAND 1
/* Define to 1 if you have the `sscanf' function. */
#define HAVE_SSCANF 1
/* Define to 1 if you have the `stat' function. */
#define HAVE_STAT 1
/* Define to 1 if you have the <stdarg.h> header file. */
#define HAVE_STDARG_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the `strftime' function. */
#define HAVE_STRFTIME 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
*/
/* #undef HAVE_SYS_DIR_H */
/* Define to 1 if you have the <sys/mman.h> header file. */
#define HAVE_SYS_MMAN_H 1
/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
*/
/* #undef HAVE_SYS_NDIR_H */
/* Define to 1 if you have the <sys/select.h> header file. */
#define HAVE_SYS_SELECT_H 1
/* Define to 1 if you have the <sys/socket.h> header file. */
#define HAVE_SYS_SOCKET_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/timeb.h> header file. */
#define HAVE_SYS_TIMEB_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the `time' function. */
#define HAVE_TIME 1
/* Define to 1 if you have the <time.h> header file. */
#define HAVE_TIME_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Whether va_copy() is available */
#define HAVE_VA_COPY 1
/* Define to 1 if you have the `vfprintf' function. */
#define HAVE_VFPRINTF 1
/* Define to 1 if you have the `vsnprintf' function. */
#define HAVE_VSNPRINTF 1
/* Define to 1 if you have the `vsprintf' function. */
#define HAVE_VSPRINTF 1
/* Define to 1 if you have the <zlib.h> header file. */
/* #undef HAVE_ZLIB_H */
/* Whether __va_copy() is available */
/* #undef HAVE___VA_COPY */
/* Define as const if the declaration of iconv() needs const. */
#define ICONV_CONST
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* Name of package */
#define PACKAGE "libxml2"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME ""
/* Define to the full name and version of this package. */
#define PACKAGE_STRING ""
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME ""
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION ""
/* Type cast for the send() function 2nd arg */
#define SEND_ARG2_CAST /**/
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Support for IPv6 */
#define SUPPORT_IP6 /**/
/* Define if va_list is an array type */
#define VA_LIST_IS_ARRAY 1
/* Version number of package */
#define VERSION "2.9.8"
/* Determine what socket length (socklen_t) data type is */
#define XML_SOCKLEN_T socklen_t
/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
#define below would cause a syntax error. */
/* #undef _UINT32_T */
/* ss_family is not defined here, use __ss_family instead */
/* #undef ss_family */
/* Define to the type of an unsigned integer type of width exactly 32 bits if
such a type exists and the standard includes do not define it. */
/* #undef uint32_t */

View File

@ -0,0 +1,501 @@
/*
* Summary: compile-time version information
* Description: compile-time version information for the XML library
*
* Copy: See Copyright for the status of this software.
*
* Author: Daniel Veillard
*/
#ifndef __XML_VERSION_H__
#define __XML_VERSION_H__
#include <libxml/xmlexports.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* use those to be sure nothing nasty will happen if
* your library and includes mismatch
*/
#ifndef LIBXML2_COMPILING_MSCCDEF
XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
#endif /* LIBXML2_COMPILING_MSCCDEF */
/**
* LIBXML_DOTTED_VERSION:
*
* the version string like "1.2.3"
*/
#define LIBXML_DOTTED_VERSION "2.10.3"
/**
* LIBXML_VERSION:
*
* the version number: 1.2.3 value is 10203
*/
#define LIBXML_VERSION 21003
/**
* LIBXML_VERSION_STRING:
*
* the version number string, 1.2.3 value is "10203"
*/
#define LIBXML_VERSION_STRING "21003"
/**
* LIBXML_VERSION_EXTRA:
*
* extra version information, used to show a git commit description
*/
#define LIBXML_VERSION_EXTRA ""
/**
* LIBXML_TEST_VERSION:
*
* Macro to check that the libxml version in use is compatible with
* the version the software has been compiled against
*/
#define LIBXML_TEST_VERSION xmlCheckVersion(21003);
#ifndef VMS
#if 0
/**
* WITH_TRIO:
*
* defined if the trio support need to be configured in
*/
#define WITH_TRIO
#else
/**
* WITHOUT_TRIO:
*
* defined if the trio support should not be configured in
*/
#define WITHOUT_TRIO
#endif
#else /* VMS */
/**
* WITH_TRIO:
*
* defined if the trio support need to be configured in
*/
#define WITH_TRIO 1
#endif /* VMS */
/**
* LIBXML_THREAD_ENABLED:
*
* Whether the thread support is configured in
*/
#if 1
#define LIBXML_THREAD_ENABLED
#endif
/**
* LIBXML_THREAD_ALLOC_ENABLED:
*
* Whether the allocation hooks are per-thread
*/
#if 0
#define LIBXML_THREAD_ALLOC_ENABLED
#endif
/**
* LIBXML_TREE_ENABLED:
*
* Whether the DOM like tree manipulation API support is configured in
*/
#if 1
#define LIBXML_TREE_ENABLED
#endif
/**
* LIBXML_OUTPUT_ENABLED:
*
* Whether the serialization/saving support is configured in
*/
#if 1
#define LIBXML_OUTPUT_ENABLED
#endif
/**
* LIBXML_PUSH_ENABLED:
*
* Whether the push parsing interfaces are configured in
*/
#if 1
#define LIBXML_PUSH_ENABLED
#endif
/**
* LIBXML_READER_ENABLED:
*
* Whether the xmlReader parsing interface is configured in
*/
#if 1
#define LIBXML_READER_ENABLED
#endif
/**
* LIBXML_PATTERN_ENABLED:
*
* Whether the xmlPattern node selection interface is configured in
*/
#if 1
#define LIBXML_PATTERN_ENABLED
#endif
/**
* LIBXML_WRITER_ENABLED:
*
* Whether the xmlWriter saving interface is configured in
*/
#if 1
#define LIBXML_WRITER_ENABLED
#endif
/**
* LIBXML_SAX1_ENABLED:
*
* Whether the older SAX1 interface is configured in
*/
#if 1
#define LIBXML_SAX1_ENABLED
#endif
/**
* LIBXML_FTP_ENABLED:
*
* Whether the FTP support is configured in
*/
#if 0
#define LIBXML_FTP_ENABLED
#endif
/**
* LIBXML_HTTP_ENABLED:
*
* Whether the HTTP support is configured in
*/
#if 1
#define LIBXML_HTTP_ENABLED
#endif
/**
* LIBXML_VALID_ENABLED:
*
* Whether the DTD validation support is configured in
*/
#if 1
#define LIBXML_VALID_ENABLED
#endif
/**
* LIBXML_HTML_ENABLED:
*
* Whether the HTML support is configured in
*/
#if 1
#define LIBXML_HTML_ENABLED
#endif
/**
* LIBXML_LEGACY_ENABLED:
*
* Whether the deprecated APIs are compiled in for compatibility
*/
#if 0
#define LIBXML_LEGACY_ENABLED
#endif
/**
* LIBXML_C14N_ENABLED:
*
* Whether the Canonicalization support is configured in
*/
#if 1
#define LIBXML_C14N_ENABLED
#endif
/**
* LIBXML_CATALOG_ENABLED:
*
* Whether the Catalog support is configured in
*/
#if 1
#define LIBXML_CATALOG_ENABLED
#endif
/**
* LIBXML_XPATH_ENABLED:
*
* Whether XPath is configured in
*/
#if 1
#define LIBXML_XPATH_ENABLED
#endif
/**
* LIBXML_XPTR_ENABLED:
*
* Whether XPointer is configured in
*/
#if 1
#define LIBXML_XPTR_ENABLED
#endif
/**
* LIBXML_XPTR_LOCS_ENABLED:
*
* Whether support for XPointer locations is configured in
*/
#if 0
#define LIBXML_XPTR_LOCS_ENABLED
#endif
/**
* LIBXML_XINCLUDE_ENABLED:
*
* Whether XInclude is configured in
*/
#if 1
#define LIBXML_XINCLUDE_ENABLED
#endif
/**
* LIBXML_ICONV_ENABLED:
*
* Whether iconv support is available
*/
#if 0
#define LIBXML_ICONV_ENABLED
#endif
/**
* LIBXML_ICU_ENABLED:
*
* Whether icu support is available
*/
#if 0
#define LIBXML_ICU_ENABLED
#endif
/**
* LIBXML_ISO8859X_ENABLED:
*
* Whether ISO-8859-* support is made available in case iconv is not
*/
#if 1
#define LIBXML_ISO8859X_ENABLED
#endif
/**
* LIBXML_DEBUG_ENABLED:
*
* Whether Debugging module is configured in
*/
#if 1
#define LIBXML_DEBUG_ENABLED
#endif
/**
* DEBUG_MEMORY_LOCATION:
*
* Whether the memory debugging is configured in
*/
#if 0
#define DEBUG_MEMORY_LOCATION
#endif
/**
* LIBXML_DEBUG_RUNTIME:
*
* Whether the runtime debugging is configured in
*/
#if 0
#define LIBXML_DEBUG_RUNTIME
#endif
/**
* LIBXML_UNICODE_ENABLED:
*
* Whether the Unicode related interfaces are compiled in
*/
#if 1
#define LIBXML_UNICODE_ENABLED
#endif
/**
* LIBXML_REGEXP_ENABLED:
*
* Whether the regular expressions interfaces are compiled in
*/
#if 1
#define LIBXML_REGEXP_ENABLED
#endif
/**
* LIBXML_AUTOMATA_ENABLED:
*
* Whether the automata interfaces are compiled in
*/
#if 1
#define LIBXML_AUTOMATA_ENABLED
#endif
/**
* LIBXML_EXPR_ENABLED:
*
* Whether the formal expressions interfaces are compiled in
*
* This code is unused and disabled unconditionally for now.
*/
#if 0
#define LIBXML_EXPR_ENABLED
#endif
/**
* LIBXML_SCHEMAS_ENABLED:
*
* Whether the Schemas validation interfaces are compiled in
*/
#if 1
#define LIBXML_SCHEMAS_ENABLED
#endif
/**
* LIBXML_SCHEMATRON_ENABLED:
*
* Whether the Schematron validation interfaces are compiled in
*/
#if 1
#define LIBXML_SCHEMATRON_ENABLED
#endif
/**
* LIBXML_MODULES_ENABLED:
*
* Whether the module interfaces are compiled in
*/
#if 1
#define LIBXML_MODULES_ENABLED
/**
* LIBXML_MODULE_EXTENSION:
*
* the string suffix used by dynamic modules (usually shared libraries)
*/
#define LIBXML_MODULE_EXTENSION ".so"
#endif
/**
* LIBXML_ZLIB_ENABLED:
*
* Whether the Zlib support is compiled in
*/
#if 1
#define LIBXML_ZLIB_ENABLED
#endif
/**
* LIBXML_LZMA_ENABLED:
*
* Whether the Lzma support is compiled in
*/
#if 0
#define LIBXML_LZMA_ENABLED
#endif
#ifdef __GNUC__
/**
* ATTRIBUTE_UNUSED:
*
* Macro used to signal to GCC unused function parameters
*/
#ifndef ATTRIBUTE_UNUSED
# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7)))
# define ATTRIBUTE_UNUSED __attribute__((unused))
# else
# define ATTRIBUTE_UNUSED
# endif
#endif
/**
* LIBXML_ATTR_ALLOC_SIZE:
*
* Macro used to indicate to GCC this is an allocator function
*/
#ifndef LIBXML_ATTR_ALLOC_SIZE
# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))))
# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x)))
# else
# define LIBXML_ATTR_ALLOC_SIZE(x)
# endif
#else
# define LIBXML_ATTR_ALLOC_SIZE(x)
#endif
/**
* LIBXML_ATTR_FORMAT:
*
* Macro used to indicate to GCC the parameter are printf like
*/
#ifndef LIBXML_ATTR_FORMAT
# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)))
# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args)))
# else
# define LIBXML_ATTR_FORMAT(fmt,args)
# endif
#else
# define LIBXML_ATTR_FORMAT(fmt,args)
#endif
#ifndef XML_DEPRECATED
# ifdef IN_LIBXML
# define XML_DEPRECATED
# else
/* Available since at least GCC 3.1 */
# define XML_DEPRECATED __attribute__((deprecated))
# endif
#endif
#else /* ! __GNUC__ */
/**
* ATTRIBUTE_UNUSED:
*
* Macro used to signal to GCC unused function parameters
*/
#define ATTRIBUTE_UNUSED
/**
* LIBXML_ATTR_ALLOC_SIZE:
*
* Macro used to indicate to GCC this is an allocator function
*/
#define LIBXML_ATTR_ALLOC_SIZE(x)
/**
* LIBXML_ATTR_FORMAT:
*
* Macro used to indicate to GCC the parameter are printf like
*/
#define LIBXML_ATTR_FORMAT(fmt,args)
/**
* XML_DEPRECATED:
*
* Macro used to indicate that a function, variable, type or struct member
* is deprecated.
*/
#ifndef XML_DEPRECATED
#define XML_DEPRECATED
#endif
#endif /* __GNUC__ */
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif

View File

@ -109,7 +109,7 @@ If you are using Maven to manage your project, simply add the following dependen
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
```

View File

@ -15,6 +15,19 @@ When inserting data using parameter binding, it can avoid the resource consumpti
**Tips: It is recommended to use parameter binding for data insertion**
:::note
We only recommend using the following two forms of SQL for parameter binding data insertion:
```sql
a. Subtables already exists:
1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?)
b. Automatic table creation on insert:
1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?)
2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?)
```
:::
Next, we continue to use smart meters as an example to demonstrate the efficient writing functionality of parameter binding with various language connectors:
1. Prepare a parameterized SQL insert statement for inserting data into the supertable `meters`. This statement allows dynamically specifying subtable names, tags, and column values.
@ -28,8 +41,15 @@ Next, we continue to use smart meters as an example to demonstrate the efficient
<Tabs defaultValue="java" groupId="lang">
<TabItem value="java" label="Java">
There are two kinds of interfaces for parameter binding: one is the standard JDBC interface, and the other is an extended interface. The extended interface offers better performance.
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}}
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingStdInterfaceDemo.java:para_bind}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingExtendInterfaceDemo.java:para_bind}}
```
This is a [more detailed parameter binding example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java)
@ -91,14 +111,24 @@ This is a [more detailed parameter binding example](https://github.com/taosdata/
<TabItem label="Python" value="python">
```python
{{#include docs/examples/python/stmt_native.py}}
{{#include docs/examples/python/stmt2_native.py}}
```
</TabItem>
<TabItem label="Go" value="go">
The example code for binding parameters with stmt2 (Go connector v3.6.0 and above, TDengine v3.3.5.0 and above) is as follows:
```go
{{#include docs/examples/go/stmt2/native/main.go}}
```
The example code for binding parameters with stmt is as follows:
```go
{{#include docs/examples/go/stmt/native/main.go}}
```
</TabItem>
<TabItem label="Rust" value="rust">

View File

@ -17,7 +17,7 @@ TDengine is designed for various writing scenarios, and many of these scenarios
```sql
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
SHOW COMPACTS [compact_id];
SHOW COMPACT [compact_id];
KILL COMPACT compact_id;
```

View File

@ -0,0 +1,376 @@
---
sidebar_label: Flink
title: TDengine Flink Connector
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
Apache Flink is an open-source distributed stream batch integrated processing framework supported by the Apache Software Foundation, which can be used for many big data processing scenarios such as stream processing, batch processing, complex event processing, real-time data warehouse construction, and providing real-time data support for machine learning. At the same time, Flink has a wealth of connectors and various tools that can interface with numerous different types of data sources to achieve data reading and writing. In the process of data processing, Flink also provides a series of reliable fault-tolerant mechanisms, effectively ensuring that tasks can run stably and continuously even in the event of unexpected situations.
With the help of TDengine's Flink connector, Apache Flink can seamlessly integrate with the TDengine database. On the one hand, it can accurately store the results obtained after complex calculations and deep analysis into the TDengine database, achieving efficient storage and management of data; On the other hand, it is also possible to quickly and stably read massive amounts of data from the TDengine database, and conduct comprehensive and in-depth analysis and processing on this basis, fully tapping into the potential value of the data, providing strong data support and scientific basis for enterprise decision-making, greatly improving the efficiency and quality of data processing, and enhancing the competitiveness and innovation ability of enterprises in the digital age.
## Prerequisites
Prepare the following environment:
- TDengine cluster has been deployed and is running normally (both enterprise and community versions are available)
- TaosAdapter can run normally.
- Apache Flink v1.19.0 or above is installed. Please refer to the installation of Apache Flink [Official documents](https://flink.apache.org/)
## Supported platforms
Flink Connector supports all platforms that can run Flink 1.19 and above versions.
## Version History
| Flink Connector Version | Major Changes | TDengine Version|
|-------------------------| ------------------------------------ | ---------------- |
| 2.0.1 | Sink supports writing types from Rowdata implementations.| - |
| 2.0.0 | 1.Support SQL queries on data in TDengine database. <br/> 2. Support CDC subscription to data in TDengine database.<br/> 3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher|
| 1.0.0 | Support Sink function to write data from other sources to TDengine in the future.| 3.3.2.0 and higher|
## Exception and error codes
After the task execution fails, check the Flink task execution log to confirm the reason for the failure
Please refer to:
| Error Code | Description | Suggested Actions |
| ---------------- |------------------------------------------------------- | -------------------- |
|0xa000 | connection param error | Connector parameter error.
|0xa001 | the groupid parameter of CDC is incorrect | The groupid parameter of CDC is incorrect.|
|0xa002 | wrong topic parameter for CDC | The topic parameter for CDC is incorrect.|
|0xa010 | database name configuration error | database name configuration error.|
|0xa011 | table name configuration error | Table name configuration error.|
|0xa012 | no data was obtained from the data source | Failed to retrieve data from the data source.|
|0xa013 | value.deserializer parameter not set | No serialization method set.|
|0xa014 | list of column names set incorrectly | List of column names for target table not set. |
|0x2301 | connection already closed | The connection has been closed. Check the connection status or create a new connection to execute the relevant instructions.|
|0x2302 | this operation is NOT supported currently | The current interface is not supported, you can switch to other connection methods.|
|0x2303 | invalid variables | The parameter is invalid. Please check the corresponding interface specification and adjust the parameter type and size.|
|0x2304 | statement is closed | Statement has already been closed. Please check if the statement is closed and reused, or if the connection is working properly.|
|0x2305 | resultSet is closed | The ResultSet has been released. Please check if the ResultSet has been released and used again.|
|0x230d | parameter index out of range | parameter out of range, please check the reasonable range of the parameter.|
|0x230e | connection already closed | The connection has been closed. Please check if the connection is closed and used again, or if the connection is working properly.|
|0x230f | unknown SQL type in TDengine | Please check the Data Type types supported by TDengine.|
|0x2315 | unknown tao type in TDengine | Did the correct TDengine data type be specified when converting TDengine data type to JDBC data type.|
|0x2319 | user is required | Username information is missing when creating a connection.|
|0x231a | password is required | Password information is missing when creating a connection.|
|0x231d | can't create connection with server within | Increase connection time by adding the parameter httpConnectTimeout, or check the connection status with taosAdapter.|
|0x231e | failed to complete the task within the specified time | Increase execution time by adding the parameter messageWaitTimeout, or check the connection with taosAdapter.|
|0x2352 | unsupported encoding | An unsupported character encoding set was specified under the local connection.|
|0x2353 | internal error of database, Please see taoslog for more details | An error occurred while executing prepareStatement on the local connection. Please check the taoslog for problem localization.|
|0x2354 | connection is NULL | Connection has already been closed while executing the command on the local connection. Please check the connection with TDengine.|
|0x2355 | result set is NULL | Local connection to obtain result set, result set exception, please check connection status and retry.|
|0x2356 | invalid num of fields | The meta information obtained from the local connection result set does not match.|
|0x2357 | empty SQL string | Fill in the correct SQL for execution.|
|0x2371 | consumer properties must not be null | When creating a subscription, the parameter is empty. Please fill in the correct parameter.|
|0x2375 | topic reference has been destroyed | During the process of creating a data subscription, the topic reference was released. Please check the connection with TDengine.|
|0x2376 | failed to set consumer topic, Topic name is empty | During the process of creating a data subscription, the subscription topic name is empty. Please check if the specified topic name is filled in correctly.|
|0x2377 | consumer reference has been destroyed | The subscription data transmission channel has been closed, please check the connection with TDengine.|
|0x2378 | consumer create error | Failed to create data subscription. Please check the taos log based on the error message to locate the problem.|
|0x237a | vGroup not found in result set VGroup | Not assigned to the current consumer, due to the Rebalance mechanism, the relationship between Consumer and VGroup is not bound.|
## Data type mapping
TDengine currently supports timestamp, number, character, and boolean types, and the corresponding type conversions with Flink RowData Type are as follows:
| TDengine DataType | Flink RowDataType |
| ----------------- | ------------------ |
| TIMESTAMP | TimestampData |
| INT | Integer |
| BIGINT | Long |
| FLOAT | Float |
| DOUBLE | Double |
| SMALLINT | Short |
| TINYINT | Byte |
| BOOL | Boolean |
| BINARY | byte[] |
| NCHAR | StringData |
| JSON | StringData |
| VARBINARY | byte[] |
| GEOMETRY | byte[] |
## Instructions for use
### Flink Semantic Selection Instructions
The semantic reason for using At Least One (at least once) is:
- TDengine currently does not support transactions and cannot perform frequent checkpoint operations and complex transaction coordination.
- Due to TDengine's use of timestamps as primary keys, downstream operators of duplicate data can perform filtering operations to avoid duplicate calculations.
- Using At Least One (at least once) to ensure high data processing performance and low data latency, the setting method is as follows:
Instructions:
```java
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(5000);
env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE);
```
If using Maven to manage a project, simply add the following dependencies in pom.xml.
```xml
<dependency>
<groupId>com.taosdata.flink</groupId>
<artifactId>flink-connector-tdengine</artifactId>
<version>2.0.1</version>
</dependency>
```
The parameters for establishing a connection include URL and Properties.
The URL specification format is:
`jdbc: TAOS-WS://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&timezone={timezone}]`
Parameter description:
- User: Login TDengine username, default value is' root '.
- Password: User login password, default value 'taosdata'.
- database_name: database name。
- timezone: time zone。
- HttpConnectTimeout: The connection timeout time, measured in milliseconds, with a default value of 60000.
- MessageWaitTimeout: The timeout period for a message, measured in milliseconds, with a default value of 60000.
- UseSSL: Whether SSL is used in the connection.
### Source
Source retrieves data from the TDengine database, converts it into a format and type that Flink can handle internally, and reads and distributes it in parallel, providing efficient input for subsequent data processing.
By setting the parallelism of the data source, multiple threads can read data from the data source in parallel, improving the efficiency and throughput of data reading, and fully utilizing cluster resources for large-scale data processing capabilities.
#### Source Properties
The configuration parameters in Properties are as follows:
- TDengineConfigParams.PROPERTY_KEY_USER: Login to TDengine username, default value is 'root '.
- TDengineConfigParams.PROPERTY_KEY_PASSWORD: User login password, default value 'taosdata'.
- TDengineConfigParams.VALUE_DESERIALIZER: The downstream operator receives the result set deserialization method. If the received result set type is `RowData` of `Flink`, it only needs to be set to `RowData`. It is also possible to inherit `TDengineRecordDeserialization` and implement `convert` and `getProducedType` methods, customizing the deserialization method based on `ResultSet` of `SQL`.
- TDengineConfigParams.TD_BATCH_MODE: This parameter is used to batch push data to downstream operators. If set to True, when creating the `TDengine Source` object, it is necessary to specify the data type as a `Template` form of the `SourceRecords` type.
- TDengineConfigParams.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: Message timeout time, in milliseconds, default value is 60000.
- TDengineConfigParams.PROPERTY_KEY_ENABLE_COMPRESSION: Is compression enabled during the transmission process. true: Enable, false: Not enabled. The default is false.
- TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable automatic reconnection. true: Enable, false: Not enabled. The default is false.
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Automatic reconnection retry interval, in milliseconds, default value 2000. It only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_RETRY_COUNT: The default value for automatic reconnection retry is 3, which only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
- TDengineConfigParams.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Turn off SSL certificate verification. true: Enable, false: Not enabled. The default is false.
#### Split by time
Users can split the SQL query into multiple subtasks based on time, entering: start time, end time, split interval, time field name. The system will split and obtain data in parallel according to the set interval (time left closed and right open).
```java
{{#include docs/examples/flink/Main.java:time_interval}}
```
Splitting by Super Table TAG
Users can split the query SQL into multiple query conditions based on the TAG field of the super table, and the system will split them into subtasks corresponding to each query condition, thereby obtaining data in parallel.
```java
{{#include docs/examples/flink/Main.java:tag_split}}
```
Classify by table
Support sharding by inputting multiple super tables or regular tables with the same table structure. The system will split them according to the method of one table, one task, and then obtain data in parallel.
```java
{{#include docs/examples/flink/Main.java:table_split}}
```
Use Source connector
The query result is RowData data type example:
<details>
<summary>RowData Source</summary>
```java
{{#include docs/examples/flink/Main.java:source_test}}
```
</details>
Example of batch query results:
<details>
<summary>Batch Source</summary>
```java
{{#include docs/examples/flink/Main.java:source_batch_test}}
```
</details>
Example of custom data type query result:
<details>
<summary>Custom Type Source</summary>
```java
{{#include docs/examples/flink/Main.java:source_custom_type_test}}
```
</details>
- ResultBean is a custom inner class used to define the data type of the Source query results.
- ResultSoureDeserialization is a custom inner class that inherits `TDengine` RecordDesrialization and implements convert and getProducedType methods.
### CDC Data Subscription
Flink CDC is mainly used to provide data subscription functionality, which can monitor real-time changes in TDengine database data and transmit these changes in the form of data streams to Flink for processing, while ensuring data consistency and integrity.
Parameter Description
- TDengineCdcParams.BOOTSTRAP_SERVERS: `ip:port` of the TDengine server, if using WebSocket connection, then it is the `ip:port` where taosAdapter is located.
- TDengineCdcParams.CONNECT_USER: Login to TDengine username, default value is 'root '.
- TDengineCdcParams.CONNECT_PASS: User login password, default value 'taosdata'.
- TDengineCdcParams.POLL_INTERVAL_MS: Pull data interval, default 500ms.
- TDengineCdcParams. VALUE_DESERIALIZER: Result set deserialization method, If the received result set type is `RowData` of `Flink`, simply set it to 'RowData'. You can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer`, specify the result set bean, and implement deserialization. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and customize the deserialization method based on the SQL resultSet.
- TDengineCdcParams.TMQ_BATCH_MODE: This parameter is used to batch push data to downstream operators. If set to True, when creating the `TDengineCdcSource` object, it is necessary to specify the data type as a template form of the `ConsumerRecords` type.
- TDengineCdcParams.GROUP_ID: Consumer group ID, the same consumer group shares consumption progress。Maximum length: 192.
- TDengineCdcParams.AUTO_OFFSET_RESET: Initial position of the consumer group subscription `earliest` subscribe from the beginning, `latest` subscribe from the latest data, default `latest`)。
- TDengineCdcParams.ENABLE_AUTO_COMMIT: Whether to enable automatic consumption point submissiontrue: automatic submissionfalsesubmit based on the `checkpoint` time, default to false.
> **Note**The automatic submission mode of the reader automatically submits data after obtaining it, regardless of whether the downstream operator has processed the data correctly. There is a risk of data loss, and it is mainly used for efficient stateless operator scenarios or scenarios with low data consistency requirements.
- TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS: Time interval for automatically submitting consumption records, in milliseconds, default 5000. This parameter takes effect when `ENABLE_AUTO_COMMIT` is set to true.
- TDengineConfigParams.PROPERTY_KEY_ENABLE_COMPRESSION: Is compression enabled during the transmission process. true: Enable, false: Not enabled. The default is false.
- TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable automatic reconnection. true: Enable, false: Not enabled. The default is false.
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Automatic reconnection retry interval, in milliseconds, default value 2000. It only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_RETRY_COUNT: The default value for automatic reconnection retry is 3, which only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
- TDengineCdcParams.TMQ_SESSION_TIMEOUT_MS: Timeout after consumer heartbeat is lost, after which rebalance logic is triggered, and upon success, that consumer will be removed (supported from version 3.3.3.0)Default is 12000, range [6000, 1800000].
- TDengineCdcParams.TMQ_MAX_POLL_INTERVAL_MS: The longest time interval for consumer poll data fetching, exceeding this time will be considered as the consumer being offline, triggering rebalance logic, and upon success, that consumer will be removed (supported from version 3.3.3.0) Default is 300000, range [1000, INT32_MAX].
#### Use CDC connector
The CDC connector will create consumers based on the parallelism set by the user, so the user should set the parallelism reasonably according to the resource situation.
The subscription result is RowData data type example:
<details>
<summary>CDC Source</summary>
```java
{{#include docs/examples/flink/Main.java:cdc_source}}
```
</details>
Example of batch query results:
<details>
<summary>CDC Batch Source</summary>
```java
{{#include docs/examples/flink/Main.java:cdc_batch_source}}
```
</details>
Example of custom data type query result:
<details>
<summary>CDC Custom Type</summary>
```java
{{#include docs/examples/flink/Main.java:cdc_custom_type_test}}
```
</details>
- ResultBean is a custom inner class whose field names and data types correspond one-to-one with column names and data types. This allows the deserialization class corresponding to the value.ddeserializer property to deserialize objects of ResultBean type.
### Sink
The core function of Sink is to efficiently and accurately write Flink processed data from different data sources or operators into TDengine. In this process, the efficient write mechanism possessed by TDengine played a crucial role, effectively ensuring the fast and stable storage of data.
Sink Properties
- TDengineConfigParams.PROPERTY_KEY_USER: Login to TDengine username, default value is 'root '.
- TDengineConfigParams.PROPERTY_KEY_PASSWORD: User login password, default value 'taosdata'.
- TDengineConfigParams.PROPERTY_KEY_DBNAME: The database name.
- TDengineConfigParams.TD_SUPERTABLE_NAME:The name of the super table. The received data must have a tbname field to determine which sub table to write to.
- TDengineConfigParams.TD_TABLE_NAME: The table name of a sub table or a normal table. This parameter only needs to be set together with `TD_SUPERTABLE_NAME`.
- TDengineConfigParams.VALUE_DESERIALIZER: The deserialization method for receiving result sets. If the type of the received result set is RowData of Flink, it only needs to be set to RowData. It is also possible to inherit 'TDengine SinkRecordSequencer' and implement the 'serialize' method, customizing the deserialization method based on the received data type.
- TDengineConfigParams.TD_BATCH_SIZE: Set the batch size for writing to the `TDengine` database once | Writing will be triggered when the number of batches is reached, or when a checkpoint is set.
- TDengineConfigParams.TD_BATCH_MODE: When set to True for receiving batch data, if the data source is `TDengine Source` , use the `SourceRecords Template` type to create a `TDengineSink` object; If the source is `TDengine CDC`, use the `ConsumerRecords Template` to create a `TDengineSink` object.
- TDengineConfigParams.TD_SOURCE_TYPE: Set the data source. When the data source is `TDengine Source`, it is set to 'tdengine_stource', and when the source is `TDengine CDC`, it is set to 'tdengine_cdc'. When the configuration of `TD_BATCH_MODE` is set to True, it takes effect.
- TDengineConfigParams.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: Message timeout time, in milliseconds, default value is 60000.
- TDengineConfigParams.PROPERTY_KEY_ENABLE_COMPRESSION: Is compression enabled during the transmission process. true: Enable, false: Not enabled. The default is false.
- TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable automatic reconnection. true: Enable, false: Not enabled. The default is false.
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Automatic reconnection retry interval, in milliseconds, default value 2000. It only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_RETRY_COUNT: The default value for automatic reconnection retry is 3, which only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
- TDengineConfigParams.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Turn off SSL certificate verification. true: Enable, false: Not enabled. The default is false.
Usage example:
Write the sub table data of the meters table in the power database into the corresponding sub table of the sink_meters super table in the power_stink database.
<details>
<summary>Sink RowData</summary>
```java
{{#include docs/examples/flink/Main.java:RowDataToSink}}
```
</details>
Usage example:
Subscribe to the sub table data of the meters super table in the power database and write it to the corresponding sub table of the sink_meters super table in the power_stink database.
<details>
<summary>Cdc Sink</summary>
```java
{{#include docs/examples/flink/Main.java:CdcRowDataToSink}}
```
</details>
### Table SQL
Extract data from multiple different data source databases (such as TDengine, MySQL, Oracle, etc.) using Table SQL, perform custom operator operations (such as data cleaning, format conversion, associating data from different tables, etc.), and then load the processed results into the target data source (such as TDengine, MySQL, etc.).
#### Source connector
Parameter configuration instructions:
| Parameter Name | Type | Parameter Description |
|-----------------------| :-----: | ------------ |
| connector | string | connector identifier, set `tdengine-connector`|
| td.jdbc.url | string | url of the connection |
| td.jdbc.mode | strng | connector type: `source`, `sink`|
| table.name | string | original or target table name |
| scan.query | string | SQL statement to retrieve data|
| sink.db.name | string | target database name|
| sink.supertable.name | string | name of the supertable|
| sink.batch.size | integer| batch size written|
| sink.table.name | string | the table name of a sub table or a normal table |
Usage example:
Write the sub table data of the meters table in the power database into the corresponding sub table of the sink_meters super table in the power_stink database.
<details>
<summary>Table Source</summary>
```java
{{#include docs/examples/flink/Main.java:source_table}}
```
</details>
#### Table CDC connector
Parameter configuration instructions:
| Parameter Name | Type | Parameter Description |
|-------------------| :-----: |--------------------------------------------------------------------------------------|
| connector | string | connector identifier, set `tdengine-connector` |
| user | string | username, default root |
| password | string | password, default taosdata |
| bootstrap. servers| string | server address |
| topic | string | subscribe to topic |
| td.jdbc.mode | strng | connector type: `cdc`, `sink` |
| group.id | string | consumption group ID, sharing consumption progress within the same consumption group |
| auto.offset.reset | string | initial position for consumer group subscription. <br/> `earliest`: subscribe from the beginning <br/> `latest` subscribe from the latest data <br/>default `latest`|
| poll.interval_mas | integer | pull data interval, default 500ms |
| sink.db.name | string | target database name |
| sink.supertable.name | string | name of the supertable |
| sink.batch.size | integer | batch size written |
| sink.table.name | string | the table name of a sub table or a normal table |
Usage example:
Subscribe to the sub table data of the meters super table in the power database and write it to the corresponding sub table of the sink_meters super table in the power_stink database.
<details>
<summary>Table CDC</summary>
```java
{{#include docs/examples/flink/Main.java:cdc_table}}
```
</details>

View File

@ -13,9 +13,9 @@ Through the Python connector of TDengine, Superset can support TDengine data sou
Prepare the following environment:
- TDengine is installed and running normally (both Enterprise and Community versions are available)
- taosAdapter is running normally, refer to [taosAdapter](../../../reference/components/taosAdapter)
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/)
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/)
## Install TDengine Python Connector
The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services.

View File

@ -43,7 +43,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection, default value is 10s|
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 0-86400000,in milliseconds, default value 10000|
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
@ -77,12 +77,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000|
|singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000|
|filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0|
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan|
|queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting|
### Region Related
@ -190,7 +185,8 @@ The effective value of charset is UTF-8.
|Parameter Name |Supported Version |Dynamic Modification|Description|
|-----------------------|-------------------------|--------------------|------------|
|supportVnodes | |Supported, effective immediately |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 0-1024, default value 4|
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 1-1024, default value 4|
|numOfCompactThreads | |Supported, effective after restart|Maximum number of commit threads, range 1-16, default value 2|
|numOfMnodeReadThreads | |Supported, effective after restart|Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|numOfVnodeQueryThreads | |Supported, effective after restart|Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|numOfVnodeFetchThreads | |Supported, effective after restart|Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|

View File

@ -268,7 +268,22 @@ An exporter used by Prometheus that exposes hardware and operating system metric
### Getting the VGroup ID of a table
You can access the HTTP interface `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get the VGroup ID of a table.
You can send a POST request to the HTTP interface `http://<fqdn>:<port>/rest/sql/<db>/vgid` to get the VGroup ID of a table.
The body should be a JSON array of multiple table names.
Example: Get the VGroup ID for the database power and tables d_bind_1 and d_bind_2.
```shell
curl --location 'http://127.0.0.1:6041/rest/sql/power/vgid' \
--user 'root:taosdata' \
--data '["d_bind_1","d_bind_2"]'
```
response:
```json
{"code":0,"vgIDs":[153,152]}
```
## Memory Usage Optimization Methods

View File

@ -403,7 +403,7 @@ Specify the configuration parameters for tag and data columns in `super_tables`
- **min**: The minimum value for the data type of the column/tag. Generated values will be greater than or equal to the minimum value.
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the minimum value.
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the maximum value.
- **scalingFactor**: Floating-point precision enhancement factor, only effective when the data type is float/double, valid values range from 1 to 1000000 positive integers. Used to enhance the precision of generated floating points, especially when min or max values are small. This attribute enhances the precision after the decimal point by powers of 10: a scalingFactor of 10 means enhancing the precision by 1 decimal place, 100 means 2 places, and so on.

View File

@ -65,7 +65,7 @@ database_option: {
- MINROWS: The minimum number of records in a file block, default is 100.
- KEEP: Indicates the number of days data files are kept, default value is 3650, range [1, 365000], and must be greater than or equal to 3 times the DURATION parameter value. The database will automatically delete data that has been saved for longer than the KEEP value to free up storage space. KEEP can use unit-specified formats, such as KEEP 100h, KEEP 10d, etc., supports m (minutes), h (hours), and d (days) three units. It can also be written without a unit, like KEEP 50, where the default unit is days. The enterprise version supports multi-tier storage feature, thus, multiple retention times can be set (multiple separated by commas, up to 3, satisfying keep 0 \<= keep 1 \<= keep 2, such as KEEP 100h,100d,3650d); the community version does not support multi-tier storage feature (even if multiple retention times are configured, it will not take effect, KEEP will take the longest retention time).
- KEEP_TIME_OFFSET: Effective from version 3.2.0.0. The delay execution time for deleting or migrating data that has been saved for longer than the KEEP value, default value is 0 (hours). After the data file's save time exceeds KEEP, the deletion or migration operation will not be executed immediately, but will wait an additional interval specified by this parameter, to avoid peak business periods.
- STT_TRIGGER: Indicates the number of file merges triggered by disk files. The open-source version is fixed at 1, the enterprise version can be set from 1 to 16. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value.
- STT_TRIGGER: Indicates the number of file merges triggered by disk files. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value.
- SINGLE_STABLE: Indicates whether only one supertable can be created in this database, used in cases where the supertable has a very large number of columns.
- 0: Indicates that multiple supertables can be created.
- 1: Indicates that only one supertable can be created.
@ -144,10 +144,6 @@ You can view cacheload through show \<db_name>.vgroups;
If cacheload is very close to cachesize, then cachesize may be too small. If cacheload is significantly less than cachesize, then cachesize is sufficient. You can decide whether to modify cachesize based on this principle. The specific modification value can be determined based on the available system memory, whether to double it or increase it several times.
4. stt_trigger
Please stop database writing before modifying the stt_trigger parameter.
:::note
Other parameters are not supported for modification in version 3.0.0.0

View File

@ -491,15 +491,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::
## UNION ALL Clause
## UNION Clause
```text title=Syntax
SELECT ...
UNION ALL SELECT ...
[UNION ALL SELECT ...]
UNION [ALL] SELECT ...
[UNION [ALL] SELECT ...]
```
TDengine supports the UNION ALL operator. This means that if multiple SELECT clauses return result sets with the exact same structure (column names, column types, number of columns, order), these result sets can be combined together using UNION ALL. Currently, only the UNION ALL mode is supported, which means that duplicates are not removed during the merging process. In the same SQL statement, a maximum of 100 UNION ALLs are supported.
TDengine supports the UNION [ALL] operator. This means that if multiple SELECT clauses return result sets with the exact same structure (column names, column types, number of columns, order), these result sets can be combined together using UNION [ALL].
## SQL Examples

View File

@ -943,6 +943,7 @@ CHAR(expr1 [, expr2] [, expr3] ...)
- NULL values in input parameters will be skipped.
- If the input parameters are of string type, they will be converted to numeric type for processing.
- If the character corresponding to the input parameter is a non-printable character, the return value will still contain the character corresponding to that parameter, but it may not be displayed.
- This function can have at most 2^31 - 1 input parameters.
**Examples**:
@ -2170,7 +2171,7 @@ ignore_negative: {
**Usage Instructions**:
- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE() from.
- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE(col1, 1s, 1) from tb1.
### DIFF

View File

@ -148,6 +148,7 @@ When using time windows, note:
- The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward.
- When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts.
- The returned results have a strictly monotonically increasing time-series.
- When using AUTO as the window offset, if the WHERE time condition is complex, such as multiple AND/OR/IN combinations, AUTO may not take effect. In such cases, you can manually specify the window offset to resolve the issue.
- When using AUTO as the window offset, if the window width unit is d (day), n (month), w (week), y (year), such as: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO), the TSMA optimization cannot take effect. If TSMA is manually created on the target table, the statement will report an error and exit; in this case, you can explicitly specify the Hint SKIP_TSMA or not use AUTO as the window offset.
### State Window

View File

@ -45,7 +45,7 @@ ALTER ALL DNODES dnode_option
For configuration parameters that support dynamic modification, you can use the ALTER DNODE or ALTER ALL DNODES syntax to modify the values of configuration parameters in a dnode. Starting from version 3.3.4.0, the modified configuration parameters will be automatically persisted and will remain effective even after the database service is restarted.
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](../01-components/01-taosd.md)
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](/tdengine-reference/components/taosd/)
The value is the parameter's value and needs to be in character format. For example, to change the log output level of dnode 1 to debug:
@ -130,7 +130,7 @@ ALTER LOCAL local_option
You can use the above syntax to modify the client's configuration parameters, and there is no need to restart the client. The changes take effect immediately.
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](../01-components/02-taosc.md)
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](/tdengine-reference/components/taosc/)
## View Client Configuration

View File

@ -304,9 +304,10 @@ Displays information about all topics in the current database.
```sql
SHOW TRANSACTIONS;
SHOW TRANSACTION [tranaction_id];
```
Displays information about transactions currently being executed in the system (these transactions are only for metadata level, not for regular tables).
Displays information about one of or all transaction(s) currently being executed in the system (these transactions are only for metadata level, not for regular tables).
## SHOW USERS

View File

@ -60,7 +60,7 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
About how to develop custom functions, please refer to [UDF Usage Instructions](../../../developer-guide/user-defined-functions/).
About how to develop custom functions, please refer to [UDF Usage Instructions](/developer-guide/user-defined-functions/).
## Manage UDF

View File

@ -4,7 +4,11 @@ title: Time-Range Small Materialized Aggregates (TSMAs)
slug: /tdengine-reference/sql-manual/manage-tsmas
---
To improve the performance of aggregate function queries with large data volumes, window pre-aggregation (TSMA Time-Range Small Materialized Aggregates) objects are created. By using fixed time windows to pre-calculate specified aggregate functions and storing the results, query performance is enhanced by querying these pre-calculated results.
In scenarios with large amounts of data, it is often necessary to query summary results for a certain period. As historical data increases or the time range expands, query time will also increase accordingly. By using materialized aggregation, the calculation results can be stored in advance, allowing subsequent queries to directly read the aggregated results without scanning the original data, such as the SMA (Small Materialized Aggregates) information within the current block.
The SMA information within a block has a small granularity. If the query time range is in days, months, or even years, the number of blocks will be large. Therefore, TSMA (Time-Range Small Materialized Aggregates) supports users to specify a time window for materialized aggregation. By pre-calculating the data within a fixed time window and storing the calculation results, queries can be performed on the pre-calculated results to improve query performance.
![TSMA Introduction](./assets/TSMA_intro.png)
## Creating TSMA

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: C/C++
title: C/C++ Client Library
slug: /tdengine-reference/client-libraries/cpp
@ -508,8 +509,7 @@ For the OpenTSDB text protocol, the parsing of timestamps follows its official p
- **Interface Description**: Used for polling to consume data. Each consumer can only call this interface in a single thread.
- tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object.
- timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second.
- **Return Value**: Non-`NULL`: Success, returns a pointer to a WS_RES structure, which contains the received message. `NULL`: Failure, indicates no data. WS_RES results are consistent with taos_query results, and information in WS_RES can be obtained through various query interfaces, such as schema, etc.
- **Return Value**: Non-`NULL`: Success, returns a pointer to a WS_RES structure, which contains the received message. `NULL`: indicates no data, the error code can be obtained through ws_errno (NULL), please refer to the reference manual for specific error message. WS_RES results are consistent with taos_query results, and information in WS_RES can be obtained through various query interfaces, such as schema, etc.
- `int32_t ws_tmq_consumer_close(ws_tmq_t *tmq)`
- **Interface Description**: Used to close the ws_tmq_t structure. Must be used in conjunction with ws_tmq_consumer_new.
- tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object.
@ -1194,7 +1194,7 @@ In addition to using SQL or parameter binding APIs to insert data, you can also
- **Interface Description**: Used to poll for consuming data, each consumer can only call this interface in a single thread.
- tmq: [Input] Points to a valid tmq_t structure pointer, representing a TMQ consumer object.
- timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second.
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_RES structure containing the received messages. `NULL`: Failure, indicates no data. TAOS_RES results are consistent with taos_query results, and information in TAOS_RES can be obtained through various query interfaces, such as schema, etc.
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_RES structure containing the received messages. `NULL`: indicates no data, the error code can be obtained through taos_errno (NULL), please refer to the reference manual for specific error message. TAOS_RES results are consistent with taos_query results, and information in TAOS_RES can be obtained through various query interfaces, such as schema, etc.
- `int32_t tmq_consumer_close(tmq_t *tmq)`
- **Interface Description**: Used to close a tmq_t structure. Must be used in conjunction with tmq_consumer_new.

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: Java
title: Java Client Library
slug: /tdengine-reference/client-libraries/java
@ -30,33 +31,36 @@ The JDBC driver implementation for TDengine strives to be consistent with relati
## Version History
| taos-jdbcdriver Version | Major Changes | TDengine Version |
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
| 3.3.4 | Fixed getInt error when data type is float. | - |
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
| 3.2.0 | Connection issues, not recommended for use. | - |
| 3.1.0 | WebSocket connection supports subscription function. | - |
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
| 2.0.37 | Added support for json tag. | - |
| 2.0.36 | Added support for schemaless writing. | - |
| taos-jdbcdriver Version | Major Changes | TDengine Version |
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
| 3.5.2 | Fixed WebSocket result set free bug. | - |
| 3.5.1 | Fixed the getObject issue in data subscription. | - |
| 3.5.0 | 1. Optimized the performance of WebSocket connection parameter binding, supporting parameter binding queries using binary data. <br/> 2. Optimized the performance of small queries in WebSocket connection. <br/> 3. Added support for setting time zone and app info on WebSocket connection. | 3.3.5.0 and higher |
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
| 3.3.4 | Fixed getInt error when data type is float. | - |
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
| 3.2.0 | Connection issues, not recommended for use. | - |
| 3.1.0 | WebSocket connection supports subscription function. | - |
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
| 2.0.37 | Added support for json tag. | - |
| 2.0.36 | Added support for schemaless writing. | - |
## Exceptions and Error Codes
@ -75,47 +79,47 @@ The error codes that the JDBC connector may report include 4 types:
Please refer to the specific error codes:
| Error Code | Description | Suggested Actions |
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
| 0x2301 | connection already closed | The connection is already closed, check the connection status, or recreate the connection to execute related commands. |
| 0x2302 | this operation is NOT supported currently! | The current interface is not supported, consider switching to another connection method. |
| 0x2303 | invalid variables | Invalid parameters, please check the interface specifications and adjust the parameter types and sizes. |
| 0x2304 | statement is closed | The statement is already closed, check if the statement was used after being closed, or if the connection is normal. |
| 0x2305 | resultSet is closed | The resultSet has been released, check if the resultSet was used after being released. |
| 0x2306 | Batch is empty! | Add parameters to prepareStatement before executing executeBatch. |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | Use executeUpdate() for update operations, not executeQuery(). |
| 0x2308 | Can not issue SELECT via executeUpdate() | Use executeQuery() for query operations, not executeUpdate(). |
| 0x230d | parameter index out of range | Parameter out of bounds, check the reasonable range of parameters. |
| 0x230e | connection already closed | The connection is already closed, check if the Connection was used after being closed, or if the connection is normal. |
| 0x230f | unknown sql type in tdengine | Check the Data Type types supported by TDengine. |
| 0x2310 | can't register JDBC-JNI driver | Cannot register JNI driver, check if the url is correctly filled. |
| 0x2312 | url is not set | Check if the REST connection url is correctly filled. |
| 0x2314 | numeric value out of range | Check if the correct interface was used for numeric types in the result set. |
| 0x2315 | unknown taos type in tdengine | When converting TDengine data types to JDBC data types, check if the correct TDengine data type was specified. |
| 0x2317 | | Incorrect request type used in REST connection. |
| 0x2318 | | Data transmission error occurred in REST connection, check the network situation and retry. |
| 0x2319 | user is required | Username information is missing when creating a connection. |
| 0x231a | password is required | Password information is missing when creating a connection. |
| 0x231c | httpEntity is null, sql: | An exception occurred in REST connection execution. |
| 0x231d | can't create connection with server within | Increase the httpConnectTimeout parameter to extend the connection time, or check the connection with taosAdapter. |
| 0x231e | failed to complete the task within the specified time | Increase the messageWaitTimeout parameter to extend the execution time, or check the connection with taosAdapter. |
| 0x2350 | unknown error | Unknown exception, please provide feedback to the developers on github. |
| 0x2352 | Unsupported encoding | An unsupported character encoding set was specified in the local connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurred while executing prepareStatement in local connection, check taos log for troubleshooting. |
| 0x2354 | JNI connection is NULL | The Connection was already closed when executing commands in local connection. Check the connection with TDengine. |
| 0x2355 | JNI result set is NULL | The result set is abnormal in local connection, check the connection and retry. |
| 0x2356 | invalid num of fields | The meta information of the result set obtained in local connection does not match. |
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation error in local connection, check taos log for troubleshooting. |
| 0x2371 | consumer properties must not be null! | Parameters are null when creating a subscription, fill in the correct parameters. |
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains empty values, fill in the correct parameters. |
| 0x2373 | failed to set consumer property, | The parameter value contains empty values, fill in the correct parameters. |
| 0x2375 | topic reference has been destroyed | During the data subscription process, the topic reference was released. Check the connection with TDengine. |
| Error Code | Description | Suggested Actions |
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
| 0x2301 | connection already closed | The connection is already closed, check the connection status, or recreate the connection to execute related commands. |
| 0x2302 | this operation is NOT supported currently! | The current interface is not supported, consider switching to another connection method. |
| 0x2303 | invalid variables | Invalid parameters, please check the interface specifications and adjust the parameter types and sizes. |
| 0x2304 | statement is closed | The statement is already closed, check if the statement was used after being closed, or if the connection is normal. |
| 0x2305 | resultSet is closed | The resultSet has been released, check if the resultSet was used after being released. |
| 0x2306 | Batch is empty! | Add parameters to prepareStatement before executing executeBatch. |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | Use executeUpdate() for update operations, not executeQuery(). |
| 0x2308 | Can not issue SELECT via executeUpdate() | Use executeQuery() for query operations, not executeUpdate(). |
| 0x230d | parameter index out of range | Parameter out of bounds, check the reasonable range of parameters. |
| 0x230e | connection already closed | The connection is already closed, check if the Connection was used after being closed, or if the connection is normal. |
| 0x230f | unknown sql type in tdengine | Check the Data Type types supported by TDengine. |
| 0x2310 | can't register JDBC-JNI driver | Cannot register JNI driver, check if the url is correctly filled. |
| 0x2312 | url is not set | Check if the REST connection url is correctly filled. |
| 0x2314 | numeric value out of range | Check if the correct interface was used for numeric types in the result set. |
| 0x2315 | unknown taos type in tdengine | When converting TDengine data types to JDBC data types, check if the correct TDengine data type was specified. |
| 0x2317 | | Incorrect request type used in REST connection. |
| 0x2318 | | Data transmission error occurred in REST connection, check the network situation and retry. |
| 0x2319 | user is required | Username information is missing when creating a connection. |
| 0x231a | password is required | Password information is missing when creating a connection. |
| 0x231c | httpEntity is null, sql: | An exception occurred in REST connection execution. |
| 0x231d | can't create connection with server within | Increase the httpConnectTimeout parameter to extend the connection time, or check the connection with taosAdapter. |
| 0x231e | failed to complete the task within the specified time | Increase the messageWaitTimeout parameter to extend the execution time, or check the connection with taosAdapter. |
| 0x2350 | unknown error | Unknown exception, please provide feedback to the developers on github. |
| 0x2352 | Unsupported encoding | An unsupported character encoding set was specified in the local connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurred while executing prepareStatement in local connection, check taos log for troubleshooting. |
| 0x2354 | JNI connection is NULL | The Connection was already closed when executing commands in local connection. Check the connection with TDengine. |
| 0x2355 | JNI result set is NULL | The result set is abnormal in local connection, check the connection and retry. |
| 0x2356 | invalid num of fields | The meta information of the result set obtained in local connection does not match. |
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation error in local connection, check taos log for troubleshooting. |
| 0x2371 | consumer properties must not be null! | Parameters are null when creating a subscription, fill in the correct parameters. |
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains empty values, fill in the correct parameters. |
| 0x2373 | failed to set consumer property, | The parameter value contains empty values, fill in the correct parameters. |
| 0x2375 | topic reference has been destroyed | During the data subscription process, the topic reference was released. Check the connection with TDengine. |
| 0x2376 | failed to set consumer topic, topic name is empty | During the data subscription process, the subscription topic name is empty. Check if the specified topic name is correctly filled. |
| 0x2377 | consumer reference has been destroyed | The data transmission channel for the subscription has been closed, check the connection with TDengine. |
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
| 0x2377 | consumer reference has been destroyed | The data transmission channel for the subscription has been closed, check the connection with TDengine. |
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
@ -244,13 +248,13 @@ For WebSocket connections, the configuration parameters in the URL are as follow
- user: Login username for TDengine, default value 'root'.
- password: User login password, default value 'taosdata'.
- charset: Specifies the character set for parsing string data when batch fetching is enabled.
- batchErrorIgnore: true: Continues executing the following SQL if one SQL fails during the execution of Statement's executeBatch. false: Does not execute any statements after a failed SQL. Default value: false.
- httpConnectTimeout: Connection timeout in ms, default value 60000.
- messageWaitTimeout: Message timeout in ms, default value 60000.
- useSSL: Whether SSL is used in the connection.
- timezone: Client timezone, default is the system current timezone. Recommended not to set, using the system time zone provides better performance.
**Note**: Some configuration items (such as: locale, timezone) do not take effect in WebSocket connections.
**Note**: Some configuration items (such as: locale, charset) do not take effect in WebSocket connections.
**REST Connection**
Using JDBC REST connection does not depend on the client driver. Compared to native JDBC connections, you only need to:
@ -263,14 +267,13 @@ For REST connections, the configuration parameters in the URL are as follows:
- user: Login username for TDengine, default value 'root'.
- password: User login password, default value 'taosdata'.
- charset: Specifies the character set for parsing string data when batch fetching is enabled.
- batchErrorIgnore: true: Continues executing the following SQL if one SQL fails during the execution of Statement's executeBatch. false: Does not execute any statements after a failed SQL. Default value: false.
- httpConnectTimeout: Connection timeout in ms, default value 60000.
- httpSocketTimeout: Socket timeout in ms, default value 60000.
- useSSL: Whether SSL is used in the connection.
- httpPoolSize: REST concurrent request size, default 20.
**Note**: Some configuration items (such as: locale, timezone) do not take effect in REST connections.
**Note**: Some configuration items (such as: locale, charset and timezone) do not take effect in REST connections.
:::note
@ -294,7 +297,9 @@ The configuration parameters in properties are as follows:
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: Effective only when using native JDBC connections. Client configuration file directory path, default value on Linux OS is `/etc/taos`, on Windows OS is `C:/TDengine/cfg`.
- TSDBDriver.PROPERTY_KEY_CHARSET: Character set used by the client, default value is the system character set.
- TSDBDriver.PROPERTY_KEY_LOCALE: Effective only when using native JDBC connections. Client locale, default value is the current system locale.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: Effective only when using native JDBC connections. Client time zone, default value is the current system time zone. Due to historical reasons, we only support part of the POSIX standard, such as UTC-8 (representing Shanghai, China), GMT-8, Asia/Shanghai.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:
- Native connections: Client time zone, default value is the current system time zone. Effective globally. Due to historical reasons, we only support part of the POSIX standard, such as UTC-8 (representing Shanghai, China), GMT-8, Asia/Shanghai.
- WebSocket connections. Client time zone, default value is the current system time zone. Effective on the connection. Only IANA time zones are supported, such as Asia/Shanghai. It is recommended not to set this parameter, as using the system time zone provides better performance.
- TSDBDriver.HTTP_CONNECT_TIMEOUT: Connection timeout, in ms, default value is 60000. Effective only in REST connections.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: Socket timeout, in ms, default value is 60000. Effective only in REST connections and when batchfetch is set to false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: Message timeout, in ms, default value is 60000. Effective only under WebSocket connections.
@ -303,12 +308,14 @@ The configuration parameters in properties are as follows:
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: Whether to enable compression during transmission. Effective only when using REST/WebSocket connections. true: enabled, false: not enabled. Default is false.
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable auto-reconnect. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
> **Note**: Enabling auto-reconnect is only effective for simple SQL execution, schema-less writing, and data subscription. It is ineffective for parameter binding. Auto-reconnect is only effective for connections established through parameters specifying the database, and ineffective for later `use db` statements to switch databases.
> **Note**: Enabling auto-reconnect is only effective for simple SQL execution, schema-less writing, and data subscription. It is ineffective for parameter binding. Auto-reconnect is only effective for connections established through parameters specifying the database, and ineffective for later `use db` statements to switch databases.
- TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Auto-reconnect retry interval, in milliseconds, default value 2000. Effective only when PROPERTY_KEY_ENABLE_AUTO_RECONNECT is true.
- TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: Auto-reconnect retry count, default value 3, effective only when PROPERTY_KEY_ENABLE_AUTO_RECONNECT is true.
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
- TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java.
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty.
Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties.
**Priority of Configuration Parameters**
@ -489,16 +496,16 @@ For example: if the password is specified as taosdata in the URL and as taosdemo
List of interface methods that return `true` for supported features, others not explicitly mentioned return `false`.
| Interface Method | Description |
|--------------------------------------------------------|-----------------------------------------------------|
| `boolean nullsAreSortedAtStart()` | Determines if `NULL` values are sorted at the start |
| `boolean storesLowerCaseIdentifiers()` | Determines if the database stores identifiers in lowercase |
| `boolean supportsAlterTableWithAddColumn()` | Determines if the database supports adding columns with `ALTER TABLE` |
| `boolean supportsAlterTableWithDropColumn()` | Determines if the database supports dropping columns with `ALTER TABLE` |
| `boolean supportsColumnAliasing()` | Determines if the database supports column aliasing |
| `boolean supportsGroupBy()` | Determines if the database supports `GROUP BY` statements |
| `boolean isCatalogAtStart()` | Determines if the catalog name appears at the start of the fully qualified name in the database |
| `boolean supportsCatalogsInDataManipulation()` | Determines if the database supports catalog names in data manipulation statements |
| Interface Method | Description |
| ---------------------------------------------- | ----------------------------------------------------------------------------------------------- |
| `boolean nullsAreSortedAtStart()` | Determines if `NULL` values are sorted at the start |
| `boolean storesLowerCaseIdentifiers()` | Determines if the database stores identifiers in lowercase |
| `boolean supportsAlterTableWithAddColumn()` | Determines if the database supports adding columns with `ALTER TABLE` |
| `boolean supportsAlterTableWithDropColumn()` | Determines if the database supports dropping columns with `ALTER TABLE` |
| `boolean supportsColumnAliasing()` | Determines if the database supports column aliasing |
| `boolean supportsGroupBy()` | Determines if the database supports `GROUP BY` statements |
| `boolean isCatalogAtStart()` | Determines if the catalog name appears at the start of the fully qualified name in the database |
| `boolean supportsCatalogsInDataManipulation()` | Determines if the database supports catalog names in data manipulation statements |
### Connection Features

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: Go
title: Go Client Library
slug: /tdengine-reference/client-libraries/go
@ -21,24 +22,25 @@ Supports Go 1.14 and above.
## Version History
| driver-go Version | Major Changes | TDengine Version |
|------------------|------------------------------------------------------------------|-------------------|
| v3.5.8 | Fixed null pointer exception. | - |
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
| v3.5.3 | Refactored taosWS. | - |
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
| v3.1.0 | Provided Kafka-like subscription API. | - |
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
| v3.0.3 | Websocket-based statement insert. | - |
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
| v3.0.1 | Websocket-based message subscription. | - |
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
| driver-go Version | Major Changes | TDengine Version |
|-------------------|-------------------------------------------------------------------------------------------------|--------------------|
| v3.6.0 | stmt2 native interface, DSN supports passwords containing special characters (url.QueryEscape). | 3.3.5.0 and higher |
| v3.5.8 | Fixed null pointer exception. | - |
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
| v3.5.3 | Refactored taosWS. | - |
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
| v3.1.0 | Provided Kafka-like subscription API. | - |
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
| v3.0.3 | Websocket-based statement insert. | - |
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
| v3.0.1 | Websocket-based message subscription. | - |
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
## Exceptions and Error Codes
@ -136,6 +138,8 @@ Full form of DSN:
username:password@protocol(address)/dbname?param=value
```
When the password contains special characters, it needs to be escaped using url.QueryEscape.
##### Native Connection
Import the driver:
@ -493,6 +497,43 @@ The `af` package provides more interfaces using native connections for parameter
* **Interface Description**: Closes the statement.
* **Return Value**: Error information.
From version 3.6.0, the `stmt2` interface for binding parameters is provided.
* `func (conn *Connector) Stmt2(reqID int64, singleTableBindOnce bool) *Stmt2`
* **Interface Description**: Returns a Stmt2 object bound to this connection.
* **Parameter Description**:
* `reqID`: Request ID.
* `singleTableBindOnce`: Indicates whether a single child table is bound only once during a single execution.
* **Return Value**: Stmt2 object.
* `func (s *Stmt2) Prepare(sql string) error`
* **Interface Description**: Prepares an SQL.
* **Parameter Description**:
* `sql`: The statement for parameter binding.
* **Return Value**: Error information.
* `func (s *Stmt2) Bind(params []*stmt.TaosStmt2BindData) error`
* **Interface Description**: Binds data to the prepared statement.
* **Parameter Description**:
* `params`: The data to bind.
* **Return Value**: Error information.
* `func (s *Stmt2) Execute() error`
* **Interface Description**: Executes the batch.
* **Return Value**: Error information.
* `func (s *Stmt2) GetAffectedRows() int`
* **Interface Description**: Gets the number of affected rows (only valid for insert statements).
* **Return Value**: Number of affected rows.
* `func (s *Stmt2) UseResult() (driver.Rows, error)`
* **Interface Description**: Retrieves the result set (only valid for query statements).
* **Return Value**: Result set Rows object, error information.
* `func (s *Stmt2) Close() error`
* **Interface Description**: Closes the statement.
* **Return Value**: Error information.
The `ws/stmt` package provides interfaces for parameter binding via WebSocket
* `func (c *Connector) Init() (*Stmt, error)`

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: Rust
title: Rust Client Library
slug: /tdengine-reference/client-libraries/rust

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: Python
title: Python Client Library
slug: /tdengine-reference/client-libraries/python
@ -49,12 +50,14 @@ Supports Python 3.0 and above.
-The platforms supported by native connections are consistent with those supported by the TDengine client driver.
-WebSocket/REST connections support all platforms that can run Python.
## Versions History
## Version History
Python Connector historical versions (it is recommended to use the latest version of 'taopsy'):
|Python Connector Version | Major Changes | TDengine Version|
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
|2.7.21 | Native supports STMT2 writing | - |
|2.7.19 | Support Apache Superset connection to TDengine Cloud data source | - |
|2.7.18 | Support Apache SuperSet BI Tools. | - |
|2.7.16 | Add subscription configuration (session. timeout. ms, Max. roll. interval. ms). | - |
|2.7.15 | Added support for VARBINRY and GEOMETRY types. | - |
@ -136,7 +139,7 @@ TDengine currently supports timestamp, numeric, character, boolean types, and th
| [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | tmq subscription |
| [native_all_type_query.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_query.py) | Example supporting all types |
| [native_all_type_stmt.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_stmt.py) | Parameter binding example supporting all types |
| [test_stmt2.py](https://github.com/taosdata/taos-connector-python/blob/main/tests/test_stmt2.py) | Example of STMT2 writing |
Example program source code can be found at:
1. [More native example programs](https://github.com/taosdata/taos-connector-python/tree/main/examples)
@ -429,51 +432,40 @@ TaosResult object can be iterated over to retrieve queried data.
- **Exceptions**: Throws `SchemalessError` if operation fails.
#### Parameter Binding
- `def statement(self, sql=None)`
- **Interface Description**: Creates a stmt object using the connection object, if sql is not empty it will call prepare.
- `sql`: Precompiled SQL statement.
- **Return Value**: stmt object.
- **Exception**: Throws `StatementError` exception on failure.
- `def statement2(self, sql=None, option=None)`
- **Interface Description**Creating an STMT2 object using a connection object
- **Parameter Description**
- `sql`: The bound SQL statement will call the `prepare` function if it is not empty
- `option` Pass in `TaoStmt2Option` class instance
- **Return Value**STMT2 object
- **Exception**Throws `ConnectionError` on failure
- `def prepare(self, sql)`
- **Interface Description**: Binds a precompiled sql statement.
- **Parameter Description**:
- `sql`: Precompiled SQL statement.
- **Exception**: Throws `StatementError` exception on failure.
- `def set_tbname(self, name)`
- **Interface Description**: Sets the table name for data to be written to.
- **Parameter Description**:
- `name`: Table name, if you need to specify a database, for example: `db_name.table_name`.
- **Exception**: Throws `StatementError` exception on failure.
- `def set_tbname_tags(self, name, tags):`
- **Interface Description**: Sets the table and Tags data, used for automatic table creation.
- **Parameter Description**:
- `name`: Table name, if you need to specify a database, for example: `db_name.table_name`.
- `tags`: Tags data.
- **Exception**: Throws `StatementError` exception on failure.
- `def bind_param(self, params, add_batch=True)`
- **Interface Description**: Binds a set of data and submits.
- **Parameter Description**:
- `params`: Data to bind.
- `add_batch`: Whether to submit the bound data.
- **Exception**: Throws `StatementError` exception on failure.
- `def bind_param_batch(self, binds, add_batch=True)`
- **Interface Description**: Binds multiple sets of data and submits.
- **Parameter Description**:
- `binds`: Data to bind.
- `add_batch`: Whether to submit the bound data.
- **Exception**: Throws `StatementError` exception on failure.
- `def add_batch(self)`
- **Interface Description**: Submits the bound data.
- **Exception**: Throws `StatementError` exception on failure.
- `def execute(self)`
- **Interface Description**: Executes and writes all the bound data.
- **Exception**: Throws `StatementError` exception on failure.
- `def affected_rows(self)`
- **Interface Description**: Gets the number of rows written.
- **Return Value**: Number of rows written.
- `def close(&self)`
- **Interface Description**: Closes the stmt object.
- **Interface Description**Bind a precompiled SQL statement
- **Parameter Description**
- `sql`: Precompiled SQL statement
- **Exception**Throws `StatementError` on failure
- `def bind_param(self, tbnames, tags, datas)`
- **Interface Description**Binding Data as an Independent Array
- **Parameter Description**
- `tbnames`:Bind table name array, data type is list
- `tags`: Bind tag column value array, data type is list
- `datas`: Bind data column value array, data type of list
- **Exception**Throws `StatementError` on failure
- `def bind_param_with_tables(self, tables)`
- **Interface Description**Bind data in an independent table format. Independent tables are organized by table units, with table name, TAG value, and data column attributes in table object
- **Parameter Description**
- `tables`: `BindTable` Independent table object array
- **Exception**Throws `StatementError` on failure
- `def execute(self) -> int:`
- **Interface Description**Execute to write all bound data
- **Return Value**Affects the number of rows
- **Exception**Throws `QueryError` on failure
- `def result(self)`
- **Interface Description**Get parameter binding query result set
- **Return Value**Returns the TaosResult object
- `def close(self)`
- **Interface Description** close the STMT2 object
#### Data Subscription

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: Node.js
title: Node.js Client Library
slug: /tdengine-reference/client-libraries/node

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: C#
title: C# Client Library
slug: /tdengine-reference/client-libraries/csharp
@ -22,13 +23,14 @@ import RequestId from "../../assets/resources/_request_id.mdx";
## Version History
| Connector Version | Major Changes | TDengine Version |
|------------------|-------------------------------------------------|-------------------|
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
| 3.1.2 | Fixed schemaless resource release. | - |
| 3.1.1 | Supported varbinary and geometry types. | - |
| 3.1.0 | WebSocket uses a native C# implementation. | 3.2.1.0 and higher |
| Connector Version | Major Changes | TDengine Version |
|-------------------|------------------------------------------------------------|--------------------|
| 3.1.5 | Fix WebSocket encoding error for Chinese character length. | - |
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
| 3.1.2 | Fixed schemaless resource release. | - |
| 3.1.1 | Supported varbinary and geometry types. | - |
| 3.1.0 | WebSocket uses a native C# implementation. | 3.2.1.0 and higher |
## Exceptions and Error Codes

View File

@ -124,7 +124,7 @@ In addition to this, the WebSocket connection method also supports 32-bit applic
| v1.1.0 | 1. Supports view functionality. <br/>2. Supports VARBINARY/GEOMETRY data types. <br/>3. Supports ODBC 32-bit WebSocket connection method (Enterprise edition only). <br/>4. Supports ODBC data source configuration dialog settings for compatibility adaptation options for industrial software like KingSCADA, Kepware, etc. (Enterprise edition only). | 3.3.3.0 and higher |
| v1.0.2 | Supports CP1252 character encoding. | 3.2.3.0 and higher |
| v1.0.1 | 1. Supports DSN settings for BI mode, in BI mode TDengine database does not return system database and supertable subtable information. <br/>2. Refactored character set conversion module, improving read and write performance. <br/> 3. Default connection method in ODBC data source configuration dialog changed to "WebSocket". <br/>4. Added "Test Connection" control in ODBC data source configuration dialog. <br/>5. ODBC data source configuration supports Chinese/English interface. | - |
| v1.0.0.0 | Initial release, supports interacting with Tdengine database to read and write data, refer to the "API Reference" section for details. | 3.2.2.0 and higher |
| v1.0.0.0 | Initial release, supports interacting with TDengine database to read and write data, refer to the "API Reference" section for details. | 3.2.2.0 and higher |
## Data Type Mapping

View File

@ -252,7 +252,7 @@ Description:
- code: (`int`) 0 represents success.
- column_meta: (`[][3]any`) Column information, each column is described by three values: column name (string), column type (string), and type length (int).
- rows: (`int`) Number of data return rows.
- data: (`[][]any`) Specific data content (time format only supports RFC3339, result set for timezone 0).
- data: (`[][]any`) Specific data content (time format only supports RFC3339, result set for timezone 0, when specifying tz, the corresponding time zone is returned).
Column types use the following strings:
@ -434,7 +434,6 @@ curl http://<fqnd>:<port>/rest/login/<username>/<password>
Here, `fqdn` is the FQDN or IP address of the TDengine database, `port` is the port number of the TDengine service, `username` is the database username, and `password` is the database password. The return is in JSON format, with the fields meaning as follows:
- status: Flag of the request result.
- code: Return code.
- desc: Authorization code.

View File

@ -534,4 +534,6 @@ This document details the server error codes that may be encountered when using
| 0x80004000 | Invalid message | The subscribed data is illegal, generally does not occur | Check the client-side error logs for details |
| 0x80004001 | Consumer mismatch | The vnode requested for subscription and the reassigned vnode are inconsistent, usually occurs when new consumers join the same consumer group | Internal error, not exposed to users |
| 0x80004002 | Consumer closed | The consumer no longer exists | Check if it has already been closed |
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
| 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs |

View File

@ -328,8 +328,35 @@ In addition to precomputation, TDengine also supports various downsampling stora
### Multi-Level Storage and Object Storage
By default, TDengine stores all data in the /var/lib/taos directory. To expand storage capacity, reduce potential bottlenecks caused by file reading, and enhance data throughput, TDengine allows the use of the configuration parameter `dataDir` to enable the cluster to utilize multiple mounted hard drives simultaneously.
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter "dataDir" to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter "dataDir".
dataDir format is as follows:
```
dataDir data_path [tier_level] [primary] [disable_create_new_file]
```
Where `data_path` is the folder path of mount point, and `tier_level` is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data. And `primary` means whether the data dir is the primary mount point. Enter 0 for false or 1 for true. The default value is 1. A TDengine cluster can have only one `primary` mount point, which must be on tier 0. And `disable_create_new_file` means whether to prohibit the creation of new file sets on the specified mount point. Enter 0 for false and 1 for true. The default value is 0. Tier 0 storage must have at least one mount point with disable_create_new_file set to 0. Tier 1 and tier 2 storage do not have this restriction.
Suppose there is a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, ..., /mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
```
dataDir /mnt/disk1/taos 0 1 0
dataDir /mnt/disk2/taos 0 0 0
dataDir /mnt/disk3/taos 1 0 0
dataDir /mnt/disk4/taos 1 0 1
dataDir /mnt/disk5/taos 2 0 0
dataDir /mnt/disk6/taos 2 0 0
```
Mounted disks can also be a non-local network disk, as long as the system can access it.
You can use the following command to dynamically modify dataDir to control whether disable_create_new_file is enabled for the current directory.
```
alter dnode 1 "/mnt/disk2/taos 1";
```
Note: Tiered Storage is only supported in Enterprise Edition
Additionally, TDengine offers tiered data storage functionality, allowing users to store data from different time periods in directories on different storage devices. This facilitates the separation of "hot" data (frequently accessed) and "cold" data (less frequently accessed), making full use of various storage resources while saving costs. For example, data that is recently collected and requires frequent access can be stored on high-performance solid-state drives due to their high read performance requirements. Data that exceeds a certain age and has lower query demands can be stored on mechanically driven hard disks, which are relatively cheaper.
To further reduce storage costs, TDengine also supports storing time-series data in object storage systems. Through its innovative design, in most cases, the performance of querying time-series data from object storage systems is close to half that of local disks, and in some scenarios, the performance can even be comparable to local disks. Additionally, TDengine allows users to perform delete and update operations on time-series data stored in object storage.

View File

@ -297,3 +297,22 @@ Reporting this error indicates that the first connection to the cluster was succ
Therefore, first, check whether all ports on the server and cluster (default 6030 for native connections and 6041 for HTTP connections) are open; Next, check if the client's hosts file has configured the fqdn and IP information for all dnodes in the cluster.
If the issue still cannot be resolved, it is necessary to contact Taos technical personnel for support.
### 32 Why is the original database lost and the cluster ID changed when the data directory dataDir of the database remains unchanged on the same server?
Background: When the TDengine server process (taosd) starts, if there are no valid data file subdirectories (such as mnode, dnode, and vnode) under the data directory (dataDir, which is specified in the configuration file taos.cfg), these directories will be created automatically.When a new mnode directory is created, a new cluster ID will be allocated to generate a new cluster.
Cause analysis: The data directory dataDir of taosd can point to multiple different mount points.If these mount points are not configured for automatic mounting in the fstab file, after the server restarts, dataDir will only exist as a normal directory of the local disk, and it will not point to the mounted disk as expected.At this point, if the taosd service is started, it will create a new directory under dataDir to generate a new cluster.
Impact of the problem: After the server is restarted, the original database is lost (note: it is not really lost, but the original data disk is not attached and cannot be seen for the time being) and the cluster ID changes, resulting in the inability to access the original database. For enterprise users, if they have been authorized for the cluster ID, they will also find that the machine code of the cluster server has not changed, but the original authorization has expired.If the problem is not monitored or found and handled in time, the user will not notice that the original database has been lost, resulting in losses and increased operation and maintenance costs.
Problem solving: You should configure the automatic mount of the dataDir directory in the fstab file to ensure that the dataDir always points to the expected mount point and directory. At this point, restarting the server will retrieve the original database and cluster. In the subsequent version, we will develop a function to enable taosd to exit in the startup phase when it detects that the dataDir changes before and after startup, and provide corresponding error prompts.
### 33 How to solve MVCP1400.DLL loss when running TDengine on Windows platform?
1. Reinstall Microsoft Visual C++ Redistributable: As msvcp140.dll is part of Microsoft Visual C++Redistributable, reinstalling this package usually resolves most issues. You can download the corresponding version from the official Microsoft website for installation
2. Manually download and replace the msvcp140.dll file online: You can download the msvcp140.dll file from a reliable source and copy it to the corresponding directory in the system. Ensure that the downloaded files match your system architecture (32-bit or 64 bit) and ensure the security of the source
### 34 Which fast query data from super table with TAG filter or child table ?
Directly querying from child table is fast. The query from super table with TAG filter is designed to meet the convenience of querying. It can filter data from multiple child tables at the same time. If the goal is to pursue performance and the child table has been clearly queried, directly querying from the sub table can achieve higher performance
### 35 How to view data compression ratio indicators?
Currently, TDengine only provides compression ratios based on tables, not databases or the entire system. To view the compression ratios, execute the `SHOW TABLE DISTRIBUTED table_name;` command in the client taos-CLI. The table_name can be a super table, regular table, or subtable. For details [Click Here](https://docs.tdengine.com/tdengine-reference/sql-manual/show-commands/#show-table-distributed)

View File

@ -25,6 +25,14 @@ Download links for TDengine 3.x version installation packages are as follows:
import Release from "/components/ReleaseV3";
## 3.3.5.2
<Release type="tdengine" version="3.3.5.2" />
## 3.3.5.0
<Release type="tdengine" version="3.3.5.0" />
## 3.3.4.8
<Release type="tdengine" version="3.3.4.8" />

View File

@ -2,7 +2,7 @@
title: TDengine 3.3.4.8 Release Notes
sidebar_label: 3.3.4.8
description: Version 3.3.4.8 Notes
slug: /release-history/release-notes/3.3.4.8
slug: /release-history/release-notes/3-3-4-8
---
## New Features

View File

@ -0,0 +1,85 @@
---
title: TDengine 3.3.5.0 Release Notes
sidebar_label: 3.3.5.0
description: Version 3.3.5.0 Notes
slug: /release-history/release-notes/3-3-5-0
---
## Features
1. feat: refactor MQTT to improve stability and performance
2. feat: refactor taosX incremental backup-restore
3. feat: add stmt2 apis in JDBC via websocket connection
4. feat: add stmt2 api in Rust connector
5. feat: add error codes in error prompts in taos-CLI
6. feat: superSet can connect TDengine with python connector
7. feat: configurable grafana dashboards in explorer management
8. feat: add taosX-agent in-memory cache queu capacity option
## Enhancements
1. enh: adjust the reporting mechanism of telemetry.
2. enh: support for SQL-based statistics of disk space for a specified DB.
3. enh: add memory management for SQL queries on the server side
4. enh: interval clause allows the use of the AUTO keyword to specify the window offset.
5. enh: reduce the impact on data write performance during data migration across multi-level storage
6. enh: migrate from angular to react for grafana 11.3+
7. enh: refactor taosAdapter websocket api for a slightly better perf
8. enh: add health state in taosX task status
9. enh: taosX add configurations to handle exceptions
10. enh: support setting options for client connections, including time zone, character set, user IP, and user name.
11. enh: taosdump support retry after connection timeout or broken
12. enh: allow creating index for tags that already subscribed
13. enh: taosX now support literal special chars in password
14. enh: improve data write performance when Last Cache is activated.
15. enh: compact command supports automatic execution, concurrency setting, and progress observation.
16. enh: support update global configuration parameters through SQL statements and persisting them.
17. enh: update the default compression method for all data types to improve the compression ratio in most scenarios.
18. enh: taosBenchmark --nodrop fix for mac/window
19. enh: prohibit the simultaneous execution of DB compaction and replica change operations (Enterpris).
20. enh: taosdump support primary key tables
21. enh: display user IP and name in the results of the SHOW QUERIES and SHOW CONNECTIONS statements.
22. enh: (JDBC)support batch insertion into multiple tables
23. enh: support for dynamically modifying the dataDir parameter for multi-level storage.
24. enh: prefer db file under data_dir
25. enh: enforce users to set strong passwords, which must be 8 to 16 characters in length and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters.
26. enh: improve the speed at which clients acquire the new Leader.
27. enh: support negative regex pattern in opc point selector
## Fixes
1. fix: the potential for deadlocks when updating checkpoints in stream computing under high-load scenarios.
2. fix: write tmq data into target error when terrno already set
3. fix: taosd cannot start when there is data corruption in a block within the WAL
4. fix: taosBenchmark fails when taosd disconnected in replica 2/3
5. fix: log files being lost when they are switched frequently.
6. fix: the stream computing stops due to the data update within the window.
7. fix: libtaosws.so sets an incorrect error code when the connection is terminated while fetch data.
8. fix: taosX opc error in case of @-prefixed name
9. fix: fix permission denied with show vgroups sql in cloud
10. fix: fix sql syntax error when migrating from large stables with compress options
11. fix: incorrect memory estimation for vnode usage
12. fix: failed to perform UNION ALL query on constant strings of the varchar type.
13. fix: leader transfer during the execution of transaction may cause deadlock.
14. fix: rust connector invliad pointer addr in ws_stmt_get_tag_fields
15. fix: union statement fails when executing with subqueries containing multiple NULLs.
16. fix: the pause operation of stream computing might fail.
17. fix: when writing data into a sub-table with a table name length of 192 characters using an SQL statement, errors may occur if the table name is enclosed in backticks (`).
18. fix: when performing a join query on super tables across different databases, if each database contains only one vnode, the query will return an error.
19. fix: no enough disk space cause taosX panic
20. fix: when write data to a super table, using both bound and unbound simultaneously will trigger an exception.
21. fix: metrics non-exist cause panic when connect with agent
22. fix: when creating indexes for tag with a large character length, taosd may crash.
23. fix: when the input parameters for the functions first, last, last_row, and char exceed 127, the taosd may crash. https://github.com/taosdata/TDengine/issues/29241
24. fix: when the number of rows in the result set of the LIMIT statement exceeds the size of a single data block, the returned count does not match the expectation.
25. fix: when synchronizing data between clusters, if the target task is deleted, the source cluster may run out of memory
26. fix: metadata read-write lock misconfiguration leads to a very small chance of blocking writes.
27. fix: when importing CSV files using the INSERT INTO statement on the Windows platform, the absence of a newline character at the end of the file may lead to an issue of infinite loop reading.
28. fix: after the tags of the table are updated, the stream computing fails to recognize and apply the ne values.
29. fix: fix kafka timeout issue and improve performance and stability
30. fix: in sql queries, when both 'is null' and invalid 'in' filter conditions are included simultaneously, the query results are incorrect. https://github.com/taosdata/TDengine/issues/29067
31. fix: sql queries containing both 'IN' and 'BETWEEN' filter conditions result in incorrect query results. https://github.com/taosdata/TDengine/issues/28989
32. fix: when performing multiplication or division operations between timestamp and numeric types, the results are incorrect. https://github.com/taosdata/TDengine/issues/28339
33. fix: data type conversion error in the IN statement leads to incorrect query results. https://github.com/taosdata/TDengine/issues/29047 https://github.com/taosdata/TDengine/issues/28902
34. fix: the error in filtering results when constant conditions are combined with OR operators. https://github.com/taosdata/TDengine/issues/28904
35. fix: when performing subtraction operation on timestamp type, the negative value is not considered. https://github.com/taosdata/TDengine/issues/28906
36. fix: tag values may display incorrectly when using GROUP BY tag synatix
37. fix: gcc < 10 bug cause taosX compile error

View File

@ -0,0 +1,43 @@
---
title: TDengine 3.3.5.2 Release Notes
sidebar_label: 3.3.5.2
description: Version 3.3.5.2 Notes
slug: /release-history/release-notes/3.3.5.2
---
## Features
1. feat: taosX now support multiple stables with template for MQTT
## Enhancements
1. enh: improve taosX error message if database is invalid
2. enh: use poetry group depencencies and reduce dep when install [#251](https://github.com/taosdata/taos-connector-python/issues/251)
3. enh: improve backup restore using taosX
4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader
5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later
## Fixes
1. fix: the maxRetryWaitTime parameter is used to control the maximum reconnection timeout time for the client when the cluster is unable to provide services, but it does not take effect when encountering a Sync timeout error
2. fix: supports immediate subscription to the new tag value after modifying the tag value of the sub-table
3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails
4. fix: taosd may crash when more than 100 views are created and the show views command is executed
5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail
6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail
7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash
8. fix: frequent execution of the “drop table with tb_uid” statement may lead to a deadlock in taosd
9. fix: the potential deadlock during the switching of log files
10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema)
11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up
12. fix: incorrect error reporting when attempting to write Geometry data types that do not conform to topological specifications through the STMT interface
13. fix: when using the percentile function and session window in a query statement, if an error occurs, taosd may crash
14. fix: the issue of being unable to dynamically modify system parameters
15. fix: random error of tranlict transaction in replication
16. fix: the same consumer executes the unsubscribe operation and immediately attempts to subscribe to other different topics, the subscription API will return an error
17. fix: fix CVE-2022-28948 security issue in go connector
18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error
19. fix: when changing the database from a single replica to a mulit replica, if there are some metadata generated by earlier versions that are no longer used in the new version, the modification operation will fail
20. fix: column names were not correctly copied when using SELECT * FROM subqueries
21. fix: when performing max/min function on string type data, the results are inaccurate and taosd will crash
22. fix: stream computing does not support the use of the HAVING clause, but no error is reported during creation
23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition
24. fix: in certain specific query scenarios, when JOIN and CAST are used together, taosd may crash

View File

@ -3,6 +3,9 @@ title: Release Notes
slug: /release-history/release-notes
---
[3.3.5.0](./3-3-5-0/)
[3.3.5.2](./3.3.5.2)
[3.3.4.8](./3-3-4-8/)
[3.3.4.3](./3-3-4-3/)

View File

@ -19,7 +19,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
<dependency>
<groupId>org.locationtech.jts</groupId>

View File

@ -1,6 +1,4 @@
package com.taosdata.example;
import com.alibaba.fastjson.JSON;
import com.taosdata.jdbc.AbstractStatement;
import java.sql.*;

View File

@ -104,8 +104,9 @@ public class JdbcDemo {
private void executeQuery(String sql) {
long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
ResultSet resultSet = statement.executeQuery(sql);
try (Statement statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql)) {
long end = System.currentTimeMillis();
printSql(sql, true, (end - start));
Util.printResult(resultSet);

View File

@ -47,7 +47,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
</dependencies>

View File

@ -18,7 +18,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
<!-- druid -->
<dependency>

View File

@ -17,7 +17,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>

View File

@ -47,7 +47,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
<dependency>

View File

@ -70,7 +70,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
<dependency>

View File

@ -67,7 +67,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
<!-- <scope>system</scope>-->
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
</dependency>

View File

@ -58,12 +58,13 @@ static int DemoInsertData() {
taos_cleanup();
return -1;
}
taos_free_result(result);
// you can check affectedRows here
int rows = taos_affected_rows(result);
fprintf(stdout, "Successfully inserted %d rows into power.meters.\n", rows);
taos_free_result(result);
// close & clean
taos_close(taos);
taos_cleanup();

View File

@ -0,0 +1,579 @@
package com.taosdata.flink.example;
import com.taosdata.flink.cdc.TDengineCdcSource;
import com.taosdata.flink.common.TDengineCdcParams;
import com.taosdata.flink.common.TDengineConfigParams;
import com.taosdata.flink.sink.TDengineSink;
import com.taosdata.flink.source.TDengineSource;
import com.taosdata.flink.source.entity.SourceSplitSql;
import com.taosdata.flink.source.entity.SplitType;
import com.taosdata.flink.source.entity.TimestampSplitInfo;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.core.execution.JobClient;
import org.apache.flink.shaded.curator5.com.google.common.base.Strings;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.data.RowData;
import org.junit.Assert;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.time.ZoneId;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import javax.xml.transform.Source;
import org.apache.flink.streaming.api.CheckpointingMode;
public class Main {
static String jdbcUrl = "jdbc:TAOS-WS://localhost:6041?user=root&password=taosdata";
static void prepare() throws ClassNotFoundException, SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String insertQuery = "INSERT INTO " +
"power.d1001 USING power.meters TAGS('California.SanFrancisco', 1) " +
"VALUES " +
"('2024-12-19 19:12:45.642', 50.30000, 201, 0.31000) " +
"('2024-12-19 19:12:46.642', 82.60000, 202, 0.33000) " +
"('2024-12-19 19:12:47.642', 92.30000, 203, 0.31000) " +
"('2024-12-19 18:12:45.642', 50.30000, 201, 0.31000) " +
"('2024-12-19 18:12:46.642', 82.60000, 202, 0.33000) " +
"('2024-12-19 18:12:47.642', 92.30000, 203, 0.31000) " +
"('2024-12-19 17:12:45.642', 50.30000, 201, 0.31000) " +
"('2024-12-19 17:12:46.642', 82.60000, 202, 0.33000) " +
"('2024-12-19 17:12:47.642', 92.30000, 203, 0.31000) " +
"power.d1002 USING power.meters TAGS('Alabama.Montgomery', 2) " +
"VALUES " +
"('2024-12-19 19:12:45.642', 50.30000, 204, 0.25000) " +
"('2024-12-19 19:12:46.642', 62.60000, 205, 0.33000) " +
"('2024-12-19 19:12:47.642', 72.30000, 206, 0.31000) " +
"('2024-12-19 18:12:45.642', 50.30000, 204, 0.25000) " +
"('2024-12-19 18:12:46.642', 62.60000, 205, 0.33000) " +
"('2024-12-19 18:12:47.642', 72.30000, 206, 0.31000) " +
"('2024-12-19 17:12:45.642', 50.30000, 204, 0.25000) " +
"('2024-12-19 17:12:46.642', 62.60000, 205, 0.33000) " +
"('2024-12-19 17:12:47.642', 72.30000, 206, 0.31000) ";
Class.forName("com.taosdata.jdbc.ws.WebSocketDriver");
try (Connection connection = DriverManager.getConnection(jdbcUrl, properties);
Statement stmt = connection.createStatement()) {
stmt.executeUpdate("DROP TOPIC IF EXISTS topic_meters");
stmt.executeUpdate("DROP database IF EXISTS power");
// create database
int rowsAffected = stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power vgroups 5");
stmt.executeUpdate("use power");
// you can check rowsAffected here
System.out.println("Create database power successfully, rowsAffected: " + rowsAffected);
// create table
rowsAffected = stmt.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);");
// you can check rowsAffected here
System.out.println("Create stable power.meters successfully, rowsAffected: " + rowsAffected);
stmt.executeUpdate("CREATE TOPIC topic_meters as SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM meters");
int affectedRows = stmt.executeUpdate(insertQuery);
// you can check affectedRows here
System.out.println("Successfully inserted " + affectedRows + " rows to power.meters.");
stmt.executeUpdate("DROP database IF EXISTS power_sink");
// create database
stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power_sink vgroups 5");
stmt.executeUpdate("use power_sink");
// you can check rowsAffected here
System.out.println("Create database power successfully, rowsAffected: " + rowsAffected);
// create table
stmt.executeUpdate("CREATE STABLE IF NOT EXISTS sink_meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);");
// you can check rowsAffected here
stmt.executeUpdate("CREATE TABLE IF NOT EXISTS sink_normal (ts timestamp, current float, voltage int, phase float);");
// you can check rowsAffected here
} catch (Exception ex) {
// please refer to the JDBC specifications for detailed exceptions info
System.out.printf("Failed to create database power or stable meters, %sErrMessage: %s%n",
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
ex.getMessage());
// Print stack trace for context in examples. Use logging in production.
throw ex;
}
}
public static void main(String[] args) throws Exception {
prepare();
if (args != null && args.length > 0 && args[0].equals("source")) {
testSource();
} else if (args != null && args.length > 0 && args[0].equals("table")) {
testTableToSink();
} else if (args != null && args.length > 0 && args[0].equals("cdc")) {
testCustomTypeCdc();
}else if (args != null && args.length > 0 && args[0].equals("table-cdc")) {
testCdcTableToSink();
}
}
static SourceSplitSql getTimeSplit() {
// ANCHOR: time_interval
SourceSplitSql splitSql = new SourceSplitSql();
splitSql.setSql("select ts, `current`, voltage, phase, groupid, location, tbname from meters")
.setSplitType(SplitType.SPLIT_TYPE_TIMESTAMP)
.setTimestampSplitInfo(new TimestampSplitInfo(
"2024-12-19 16:12:48.000",
"2024-12-19 19:12:48.000",
"ts",
Duration.ofHours(1),
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"),
ZoneId.of("Asia/Shanghai")));
// ANCHOR_END: time_interval
return splitSql;
}
static SourceSplitSql getTagSplit() throws Exception {
// ANCHOR: tag_split
SourceSplitSql splitSql = new SourceSplitSql();
splitSql.setSql("select ts, current, voltage, phase, groupid, location from meters where voltage > 100")
.setTagList(Arrays.asList("groupid >100 and location = 'Shanghai'",
"groupid >50 and groupid < 100 and location = 'Guangzhou'",
"groupid >0 and groupid < 50 and location = 'Beijing'"))
.setSplitType(SplitType.SPLIT_TYPE_TAG);
// ANCHOR_END: tag_split
return splitSql;
}
static SourceSplitSql getTableSqlit() {
// ANCHOR: table_split
SourceSplitSql splitSql = new SourceSplitSql();
splitSql.setSelect("ts, current, voltage, phase, groupid, location")
.setTableList(Arrays.asList("d1001", "d1002"))
.setOther("order by ts limit 100")
.setSplitType(SplitType.SPLIT_TYPE_TABLE);
// ANCHOR_END: table_split
}
//ANCHOR: source_test
static void testSource() throws Exception {
Properties connProps = new Properties();
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
splitSql.setSql("select ts, `current`, voltage, phase, groupid, location, tbname from meters")
.setSplitType(SplitType.SPLIT_TYPE_TIMESTAMP)
.setTimestampSplitInfo(new TimestampSplitInfo(
"2024-12-19 16:12:48.000",
"2024-12-19 19:12:48.000",
"ts",
Duration.ofHours(1),
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"),
ZoneId.of("Asia/Shanghai")));
TDengineSource<RowData> source = new TDengineSource<>(connProps, sql, RowData.class);
DataStreamSource<RowData> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<RowData, String>) rowData -> {
StringBuilder sb = new StringBuilder();
sb.append("ts: " + rowData.getTimestamp(0, 0) +
", current: " + rowData.getFloat(1) +
", voltage: " + rowData.getInt(2) +
", phase: " + rowData.getFloat(3) +
", location: " + new String(rowData.getBinary(4)));
sb.append("\n");
return sb.toString();
});
resultStream.print();
env.execute("tdengine flink source");
}
//ANCHOR_END: source_test
//ANCHOR: source_custom_type_test
void testCustomTypeSource() throws Exception {
System.out.println("testTDengineSourceByTimeSplit start");
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultSoureDeserialization");
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
SourceSplitSql splitSql = new SourceSplitSql();
splitSql.setSql("select ts, `current`, voltage, phase, groupid, location, tbname from meters")
.setSplitType(SplitType.SPLIT_TYPE_TIMESTAMP)
//按照时间分片
.setTimestampSplitInfo(new TimestampSplitInfo(
"2024-12-19 16:12:48.000",
"2024-12-19 19:12:48.000",
"ts",
Duration.ofHours(1),
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"),
ZoneId.of("Asia/Shanghai")));
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
TDengineSource<ResultBean> source = new TDengineSource<>(connProps, splitSql, ResultBean.class);
DataStreamSource<ResultBean> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<ResultBean, String>) rowData -> {
StringBuilder sb = new StringBuilder();
sb.append("ts: " + rowData.getTs() +
", current: " + rowData.getCurrent() +
", voltage: " + rowData.getVoltage() +
", phase: " + rowData.getPhase() +
", groupid: " + rowData.getGroupid() +
", location" + rowData.getLocation() +
", tbname: " + rowData.getTbname());
sb.append("\n");
totalVoltage.addAndGet(rowData.getVoltage());
return sb.toString();
});
resultStream.print();
env.execute("flink tdengine source");
}
//ANCHOR_END: source_custom_type_test
//ANCHOR: source_batch_test
void testBatchSource() throws Exception {
Properties connProps = new Properties();
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
connProps.setProperty(TDengineConfigParams.TD_BATCH_MODE, "true");
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
Class<SourceRecords<RowData>> typeClass = (Class<SourceRecords<RowData>>) (Class<?>) SourceRecords.class;
SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters");
TDengineSource<SourceRecords<RowData>> source = new TDengineSource<>(connProps, sql, typeClass);
DataStreamSource<SourceRecords<RowData>> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<SourceRecords<RowData>, String>) records -> {
StringBuilder sb = new StringBuilder();
Iterator<RowData> iterator = records.iterator();
while (iterator.hasNext()) {
GenericRowData row = (GenericRowData) iterator.next();
sb.append("ts: " + row.getTimestamp(0, 0) +
", current: " + row.getFloat(1) +
", voltage: " + row.getInt(2) +
", phase: " + row.getFloat(3) +
", location: " + new String(row.getBinary(4)));
sb.append("\n");
totalVoltage.addAndGet(row.getInt(2));
}
return sb.toString();
});
resultStream.print();
env.execute("flink tdengine source");
}
//ANCHOR_END: source_batch_test
//ANCHOR: cdc_source
void testTDengineCdc() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
env.enableCheckpointing(100, AT_LEAST_ONCE);
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
Properties config = new Properties();
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
config.setProperty(TDengineCdcParams.MSG_WITH_TABLE_NAME, "true");
config.setProperty(TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS, "1000");
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
config.setProperty(TDengineCdcParams.ENABLE_AUTO_COMMIT, "true");
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
TDengineCdcSource<RowData> tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class);
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<RowData, String>) rowData -> {
StringBuilder sb = new StringBuilder();
sb.append("tsxx: " + rowData.getTimestamp(0, 0) +
", current: " + rowData.getFloat(1) +
", voltage: " + rowData.getInt(2) +
", phase: " + rowData.getFloat(3) +
", location: " + new String(rowData.getBinary(4)));
sb.append("\n");
totalVoltage.addAndGet(rowData.getInt(2));
return sb.toString();
});
resultStream.print();
JobClient jobClient = env.executeAsync("Flink test cdc Example");
Thread.sleep(5000L);
// The task submitted by Flink UI cannot be cancle and needs to be stopped on the UI page.
jobClient.cancel().get();
}
//ANCHOR_END: cdc_source
//ANCHOR: cdc_batch_source
void testTDengineCdcBatch() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
Properties config = new Properties();
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
config.setProperty(TDengineCdcParams.MSG_WITH_TABLE_NAME, "true");
config.setProperty(TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS, "1000");
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
config.setProperty(TDengineCdcParams.TMQ_BATCH_MODE, "true");
Class<ConsumerRecords<RowData>> typeClass = (Class<ConsumerRecords<RowData>>) (Class<?>) ConsumerRecords.class;
TDengineCdcSource<ConsumerRecords<RowData>> tdengineSource = new TDengineCdcSource<>("topic_meters", config, typeClass);
DataStreamSource<ConsumerRecords<RowData>> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<ConsumerRecords<RowData>, String>) records -> {
Iterator<ConsumerRecord<RowData>> iterator = records.iterator();
StringBuilder sb = new StringBuilder();
while (iterator.hasNext()) {
GenericRowData row = (GenericRowData) iterator.next().value();
sb.append("tsxx: " + row.getTimestamp(0, 0) +
", current: " + row.getFloat(1) +
", voltage: " + row.getInt(2) +
", phase: " + row.getFloat(3) +
", location: " + new String(row.getBinary(4)));
sb.append("\n");
totalVoltage.addAndGet(row.getInt(2));
}
return sb.toString();
});
resultStream.print();
JobClient jobClient = env.executeAsync("Flink test cdc Example");
Thread.sleep(5000L);
jobClient.cancel().get();
}
//ANCHOR_END: cdc_batch_source
//ANCHOR: cdc_custom_type_test
static void testCustomTypeCdc() throws Exception {
System.out.println("testCustomTypeTDengineCdc start");
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
env.enableCheckpointing(100, AT_LEAST_ONCE);
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
env.getCheckpointConfig().setTolerableCheckpointFailureNumber(4);
Properties config = new Properties();
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
config.setProperty(TDengineCdcParams.MSG_WITH_TABLE_NAME, "true");
config.setProperty(TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS, "1000");
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultDeserializer");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
TDengineCdcSource<ResultBean> tdengineSource = new TDengineCdcSource<>("topic_meters", config, ResultBean.class);
DataStreamSource<ResultBean> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
DataStream<String> resultStream = input.map((MapFunction<ResultBean, String>) rowData -> {
StringBuilder sb = new StringBuilder();
sb.append("ts: " + rowData.getTs() +
", current: " + rowData.getCurrent() +
", voltage: " + rowData.getVoltage() +
", phase: " + rowData.getPhase() +
", groupid: " + rowData.getGroupid() +
", location" + rowData.getLocation() +
", tbname: " + rowData.getTbname());
sb.append("\n");
totalVoltage.addAndGet(rowData.getVoltage());
return sb.toString();
});
resultStream.print();
JobClient jobClient = env.executeAsync("Flink test cdc Example");
Thread.sleep(5000L);
jobClient.cancel().get();
}
//ANCHOR_END: cdc_custom_type_test
//ANCHOR: RowDataToSink
static void testRowDataToSink() throws Exception {
Properties connProps = new Properties();
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters");
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
TDengineSource<RowData> source = new TDengineSource<>(connProps, sql, RowData.class);
DataStreamSource<RowData> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
Properties sinkProps = new Properties();
sinkProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
sinkProps.setProperty(TDengineConfigParams.TD_SOURCE_TYPE, "tdengine_source");
sinkProps.setProperty(TDengineConfigParams.TD_DATABASE_NAME, "power_sink");
sinkProps.setProperty(TDengineConfigParams.TD_SUPERTABLE_NAME, "sink_meters");
sinkProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power_sink?user=root&password=taosdata");
sinkProps.setProperty(TDengineConfigParams.TD_BATCH_SIZE, "2000");
// Arrays.asList The list of target table field names needs to be consistent with the data order
TDengineSink<RowData> sink = new TDengineSink<>(sinkProps,
Arrays.asList("ts", "current", "voltage", "phase", "groupid", "location", "tbname"));
input.sinkTo(sink);
env.execute("flink tdengine source");
}
//ANCHOR_END: RowDataToSink
//ANCHOR: CdcRowDataToSink
static void testCdcToSink() throws Exception {
System.out.println("testTDengineCdcToTdSink start");
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
env.enableCheckpointing(500, CheckpointingMode.AT_LEAST_ONCE);
env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
env.getCheckpointConfig().setCheckpointTimeout(5000);
Properties config = new Properties();
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
TDengineCdcSource<RowData> tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class);
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
Properties sinkProps = new Properties();
sinkProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
sinkProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
sinkProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
sinkProps.setProperty(TDengineConfigParams.TD_DATABASE_NAME, "power_sink");
sinkProps.setProperty(TDengineConfigParams.TD_SUPERTABLE_NAME, "sink_meters");
sinkProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
sinkProps.setProperty(TDengineConfigParams.TD_BATCH_SIZE, "2000");
TDengineSink<RowData> sink = new TDengineSink<>(sinkProps, Arrays.asList("ts", "current", "voltage", "phase", "location", "groupid", "tbname"));
input.sinkTo(sink);
JobClient jobClient = env.executeAsync("Flink test cdc Example");
Thread.sleep(6000L);
jobClient.cancel().get();
System.out.println("testTDengineCdcToTdSink finish");
}
//ANCHOR_END: CdcRowDataToSink
//ANCHOR: source_table
static void testTableToSink() throws Exception {
System.out.println("testTableToSink start");
EnvironmentSettings fsSettings = EnvironmentSettings.newInstance().inStreamingMode().build();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, fsSettings);
String tdengineSourceTableDDL = "CREATE TABLE `meters` (" +
" ts TIMESTAMP," +
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
" location VARBINARY," +
" groupid INT," +
" tbname VARBINARY" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata'," +
" 'td.jdbc.mode' = 'source'," +
" 'table-name' = 'meters'," +
" 'scan.query' = 'SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM `meters`'" +
")";
String tdengineSinkTableDDL = "CREATE TABLE `sink_meters` (" +
" ts TIMESTAMP," +
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
" location VARBINARY," +
" groupid INT," +
" tbname VARBINARY" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'td.jdbc.mode' = 'sink'," +
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power_sink?user=root&password=taosdata'," +
" 'sink.db.name' = 'power_sink'," +
" 'sink.supertable.name' = 'sink_meters'" +
")";
tableEnv.executeSql(tdengineSourceTableDDL);
tableEnv.executeSql(tdengineSinkTableDDL);
tableEnv.executeSql("INSERT INTO sink_meters SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM `meters`");
}
//ANCHOR_END: source_table
//ANCHOR: cdc_table
static void testCdcTableToSink() throws Exception {
EnvironmentSettings fsSettings = EnvironmentSettings.newInstance().inStreamingMode().build();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(5);
env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, fsSettings);
String tdengineSourceTableDDL = "CREATE TABLE `meters` (" +
" ts TIMESTAMP," +
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
" location VARBINARY," +
" groupid INT," +
" tbname VARBINARY" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'bootstrap.servers' = 'localhost:6041'," +
" 'td.jdbc.mode' = 'cdc'," +
" 'group.id' = 'group_22'," +
" 'auto.offset.reset' = 'earliest'," +
" 'enable.auto.commit' = 'false'," +
" 'topic' = 'topic_meters'" +
")";
String tdengineSinkTableDDL = "CREATE TABLE `sink_meters` (" +
" ts TIMESTAMP," +
" `current` FLOAT," +
" voltage INT," +
" phase FLOAT," +
" location VARBINARY," +
" groupid INT," +
" tbname VARBINARY" +
") WITH (" +
" 'connector' = 'tdengine-connector'," +
" 'td.jdbc.mode' = 'cdc'," +
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power_sink?user=root&password=taosdata'," +
" 'sink.db.name' = 'power_sink'," +
" 'sink.supertable.name' = 'sink_meters'" +
")";
tableEnv.executeSql(tdengineSourceTableDDL);
tableEnv.executeSql(tdengineSinkTableDDL);
TableResult tableResult = tableEnv.executeSql("INSERT INTO sink_meters SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM `meters`");
Thread.sleep(5000L);
tableResult.getJobClient().get().cancel().get();
}
//ANCHOR_END: cdc_table
}

View File

@ -2,7 +2,7 @@ module goexample
go 1.17
require github.com/taosdata/driver-go/v3 v3.5.6
require github.com/taosdata/driver-go/v3 v3.6.0
require (
github.com/google/uuid v1.3.0 // indirect

View File

@ -18,8 +18,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/taosdata/driver-go/v3 v3.5.6 h1:LDVtMyT3B9p2VREsd5KKM91D4Y7P4kSdh2SQumXi8bk=
github.com/taosdata/driver-go/v3 v3.5.6/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
github.com/taosdata/driver-go/v3 v3.6.0 h1:4dRXMl01DhIS5xBXUvtkkB+MjL8g64zN674xKd+ojTE=
github.com/taosdata/driver-go/v3 v3.6.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,84 @@
package main
import (
"database/sql/driver"
"fmt"
"log"
"math/rand"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/stmt"
)
func main() {
host := "127.0.0.1"
numOfSubTable := 10
numOfRow := 10
db, err := af.Open(host, "root", "taosdata", "", 0)
if err != nil {
log.Fatalln("Failed to connect to " + host + "; ErrMessage: " + err.Error())
}
defer db.Close()
// prepare database and table
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatalln("Failed to create database power, ErrMessage: " + err.Error())
}
_, err = db.Exec("USE power")
if err != nil {
log.Fatalln("Failed to use database power, ErrMessage: " + err.Error())
}
_, err = db.Exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error())
}
// prepare statement
sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)"
reqID := common.GetReqID()
stmt2 := db.Stmt2(reqID, false)
err = stmt2.Prepare(sql)
if err != nil {
log.Fatalln("Failed to prepare sql, sql: " + sql + ", ErrMessage: " + err.Error())
}
for i := 1; i <= numOfSubTable; i++ {
// generate column data
current := time.Now()
columns := make([][]driver.Value, 4)
for j := 0; j < numOfRow; j++ {
columns[0] = append(columns[0], current.Add(time.Millisecond*time.Duration(j)))
columns[1] = append(columns[1], rand.Float32()*30)
columns[2] = append(columns[2], rand.Int31n(300))
columns[3] = append(columns[3], rand.Float32())
}
// generate bind data
tableName := fmt.Sprintf("d_bind_%d", i)
tags := []driver.Value{int32(i), []byte(fmt.Sprintf("location_%d", i))}
bindData := []*stmt.TaosStmt2BindData{
{
TableName: tableName,
Tags: tags,
Cols: columns,
},
}
// bind params
err = stmt2.Bind(bindData)
if err != nil {
log.Fatalln("Failed to bind params, ErrMessage: " + err.Error())
}
// execute batch
err = stmt2.Execute()
if err != nil {
log.Fatalln("Failed to exec, ErrMessage: " + err.Error())
}
// get affected rows
affected := stmt2.GetAffectedRows()
// you can check exeResult here
fmt.Printf("Successfully inserted %d rows to %s.\n", affected, tableName)
}
err = stmt2.Close()
if err != nil {
log.Fatal("failed to close statement, err:", err)
}
}

View File

@ -22,7 +22,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
<!-- ANCHOR_END: dep-->

View File

@ -0,0 +1,87 @@
package com.taos.example;
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
import java.sql.*;
import java.util.ArrayList;
import java.util.Random;
// ANCHOR: para_bind
public class WSParameterBindingExtendInterfaceDemo {
// modify host to your own
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int numOfSubTable = 10, numOfRow = 10;
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041";
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
init(conn);
String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("d_bind_" + i);
// set tags
pstmt.setTagInt(0, i);
pstmt.setTagString(1, "location_" + i);
// set column ts
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
// set column current
ArrayList<Float> currentList = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
currentList.add(random.nextFloat() * 30);
pstmt.setFloat(1, currentList);
// set column voltage
ArrayList<Integer> voltageList = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
voltageList.add(random.nextInt(300));
pstmt.setInt(2, voltageList);
// set column phase
ArrayList<Float> phaseList = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
phaseList.add(random.nextFloat());
pstmt.setFloat(3, phaseList);
// add column
pstmt.columnDataAddBatch();
}
// execute column
pstmt.columnDataExecuteBatch();
// you can check exeResult here
System.out.println("Successfully inserted " + (numOfSubTable * numOfRow) + " rows to power.meters.");
}
} catch (Exception ex) {
// please refer to the JDBC specifications for detailed exceptions info
System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n",
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
ex.getMessage());
// Print stack trace for context in examples. Use logging in production.
ex.printStackTrace();
throw ex;
}
}
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("CREATE DATABASE IF NOT EXISTS power");
stmt.execute("USE power");
stmt.execute(
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
}
}
}
// ANCHOR_END: para_bind

View File

@ -1,12 +1,10 @@
package com.taos.example;
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
import java.sql.*;
import java.util.Random;
// ANCHOR: para_bind
public class WSParameterBindingBasicDemo {
public class WSParameterBindingStdInterfaceDemo {
// modify host to your own
private static final String host = "127.0.0.1";
@ -19,31 +17,29 @@ public class WSParameterBindingBasicDemo {
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
init(conn);
String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)";
// If you are certain that the child table exists, you can avoid binding the tag column to improve performance.
String sql = "INSERT INTO power.meters (tbname, groupid, location, ts, current, voltage, phase) VALUES (?,?,?,?,?,?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
try (PreparedStatement pstmt = conn.prepareStatement(sql)) {
long current = System.currentTimeMillis();
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("d_bind_" + i);
// set tags
pstmt.setTagInt(0, i);
pstmt.setTagString(1, "location_" + i);
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setFloat(2, random.nextFloat() * 30);
pstmt.setInt(3, random.nextInt(300));
pstmt.setFloat(4, random.nextFloat());
pstmt.setString(1, "d_bind_" + i);
pstmt.setInt(2, i);
pstmt.setString(3, "location_" + i);
pstmt.setTimestamp(4, new Timestamp(current + j));
pstmt.setFloat(5, random.nextFloat() * 30);
pstmt.setInt(6, random.nextInt(300));
pstmt.setFloat(7, random.nextFloat());
pstmt.addBatch();
}
int[] exeResult = pstmt.executeBatch();
// you can check exeResult here
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
}
int[] exeResult = pstmt.executeBatch();
// you can check exeResult here
System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters.");
}
} catch (Exception ex) {
// please refer to the JDBC specifications for detailed exceptions info

View File

@ -118,9 +118,14 @@ public class TestAll {
}
@Test
public void testWsStmtBasic() throws Exception {
public void testWsStmtStd() throws Exception {
dropDB("power");
WSParameterBindingBasicDemo.main(args);
WSParameterBindingStdInterfaceDemo.main(args);
}
@Test
public void testWsStmtExtend() throws Exception {
dropDB("power");
WSParameterBindingExtendInterfaceDemo.main(args);
}
@Test

View File

@ -0,0 +1,71 @@
import taos
from datetime import datetime
import random
numOfSubTable = 10
numOfRow = 10
conn = None
stmt2 = None
host="localhost"
port=6030
try:
# 1 connect
conn = taos.connect(
user="root",
password="taosdata",
host=host,
port=port,
)
# 2 create db and table
conn.execute("CREATE DATABASE IF NOT EXISTS power")
conn.execute("USE power")
conn.execute(
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
# 3 prepare
sql = "INSERT INTO ? USING meters (groupid, location) TAGS(?,?) VALUES (?,?,?,?)"
stmt2 = conn.statement2(sql)
tbnames = []
tags = []
datas = []
for i in range(numOfSubTable):
# tbnames
tbnames.append(f"d_bind_{i}")
# tags
tags.append([i, f"location_{i}"])
# datas
current = int(datetime.now().timestamp() * 1000)
timestamps = []
currents = []
voltages = []
phases = []
for j in range (numOfRow):
timestamps.append(current + i*1000 + j)
currents.append(float(random.random() * 30))
voltages.append(random.randint(100, 300))
phases.append(float(random.random()))
data = [timestamps, currents, voltages, phases]
datas.append(data)
# 4 bind param
stmt2.bind_param(tbnames, tags, datas)
# 5 execute
stmt2.execute()
# show
print(f"Successfully inserted with stmt2 to power.meters. child={numOfSubTable} rows={numOfRow} \n")
except Exception as err:
print(f"Failed to insert to table meters using stmt2, ErrMessage:{err}")
raise err
finally:
if stmt2:
stmt2.close()
if conn:
conn.close()

View File

@ -63,7 +63,7 @@ toc_max_heading_level: 4
1. 数据库Database数据库提供时序数据的高效存储和读取能力。在工业、物联网场景由设备所产生的时序数据量是十分惊人的。从存储数据的角度来说数据库需要把这些数据持久化到硬盘上并最大程度地压缩从而降低存储成本。从读取数据的角度来说数据库需要保证实时查询以及历史数据的查询效率。比较传统的存储方案是使用 MySql、Oracle 等关系型数据库,也有 Hadoop 体系的 HBase专用的时序数据库则有 InfluxDB、OpenTSDB、Prometheus 等。
2. 数据订阅Data Subscription很多时序数据应用都需要在第一时间订阅到业务所需的实时数据从而及时了解被监测对象的最新状态,用 AI 或其他工具做实时的数据分析。同时,由于数据的隐私以及安全,你只能允许应用订阅他有权限访问的数据。因此,一个时序数据处理平台一定需要具备数据订阅的能力,帮助应用实时获取最新数据。
2. 数据订阅Data Subscription很多时序数据应用都需要在第一时间订阅到业务所需的实时数据从而及时了解被监测对象的最新状态用 AI 或其他工具做实时的数据分析。同时,由于数据的隐私以及安全,你只能允许应用订阅他有权限访问的数据。因此,一个时序数据处理平台一定需要具备数据订阅的能力,帮助应用实时获取最新数据。
3. ETLExtract Transform Load在实际的物联网、工业场景中时序数据的采集需要特定的 ETL 工具进行数据的提取、清洗和转换操作,才能把数据写入数据库中,以保证数据的质量。因为不同数据采集系统往往使用不同的标准,比如采集的温度的物理单位不一致,有的用摄氏度,有的用华氏度;系统之间所在的时区不一致,要进行转换;时间分辨率也可能不统一,因此这些从不同系统汇聚来的数据需要进行转换才能写入数据库。
@ -135,4 +135,4 @@ toc_max_heading_level: 4
18. 需要支持私有化部署。因为很多企业出于安全以及各种因素的考虑,希望采用私有化部署。而传统的企业往往没有很强的 IT 运维团队,因此在安装、部署、运维等方面需要做到简单、快捷,可维护性强。
总之,时序大数据平台应具备高效、可扩展、实时、可靠、灵活、开放、简单、易维护等特点。近年来,众多企业纷纷将时序数据从传统大数据平台或关系型数据库迁移到专用时序大数据平台,以保障海量时序数据得到快速和有效处理,支撑相关业务的持续增长。
总之,时序大数据平台应具备高效、可扩展、实时、可靠、灵活、开放、简单、易维护等特点。近年来,众多企业纷纷将时序数据从传统大数据平台或关系型数据库迁移到专用时序大数据平台,以保障海量时序数据得到快速和有效处理,支撑相关业务的持续增长。

View File

@ -65,6 +65,8 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称和 QoS。使用如下格式设置 `{topic_name}::{qos}`(如:`my_topic::0`。MQTT 协议 5.0 支持共享订阅,可以通过多个客户端订阅同一个 Topic 实现负载均衡,使用如下格式: `$share/{group_name}/{topic_name}::{qos}`,其中,`$share` 是固定前缀,表示启用共享订阅,`group_name` 是分组名称,类似 kafka 的消费者组。
在 **主题解析** 中填写 MQTT 主题解析规则,格式与 MQTT Topic 相同,将 MQTT Topic 各层级内容解析为对应变量名,`_` 表示解析时忽略当前层级。例如MQTT Topic `a/+/c` 对应解析规则如果设置为 `v1/v2/_`,代表将第一层级的 `a` 赋值给变量 `v1`,第二层级的值(这里通配符 `+` 代表任意值)复制给变量 `v2`,第三层级的值 `c` 忽略,不会赋值给任何变量。在下方的 `payload 解析` 中Topic 解析得到的变量同样可以参与各种转换和计算。
在 **数据压缩** 中配置消息体压缩算法taosX 在接收到消息后,使用对应的压缩算法对消息体进行解压缩获取原始数据。可选项 none(不压缩), gzip, snappy, lz4 和 zstd默认为 none。
在 **字符编码** 中配置消息体编码格式taosX 在接收到消息后,使用对应的编码格式对消息体进行解码获取原始数据。可选项 UTF_8, GBK, GB18030, BIG5默认为 UTF_8
@ -138,7 +140,11 @@ json 数据支持 JSONObject 或者 JSONArray使用 json 解析器可以解
#### 6.4 表映射
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮创建新的超级表。
当超级表需要根据消息动态生成时,可以选择 **创建模板**。其中,超级表名称,列名,列类型等均可以使用模板变量,当接收到数据后,程序会自动计算模板变量并生成对应的超级表模板,当数据库中超级表不存在时,会使用此模板创建超级表;对于已创建的超级表,如果缺少通过模板变量计算得到的列,也会自动创建对应列。
![mqtt-17.png](./mqtt-17.png)
在 **映射** 中,填写目标超级表中的子表名称,例如:`t_{id}`。根据需求填写映射规则,其中 mapping 支持设置缺省值。
@ -148,6 +154,16 @@ json 数据支持 JSONObject 或者 JSONArray使用 json 解析器可以解
![mqtt-13.png](./mqtt-13.png)
如果超级表列为模板变量,在子表映射时会进行 pivot 操作,其中模板变量的值展开为列名,列的值为对应的映射列
例如:
![mqtt-18.png](./mqtt-18.png)
预览结果为:
![mqtt-19.png](./mqtt-19.png)
### 7. 高级选项
在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0即不缓存。

View File

@ -94,6 +94,8 @@ JSON 解析支持 JSONObject 或者 JSONArray。 如下 JSON 示例数据,可
![JSON 解析](./pic/transform-02.png)
> 注意JSON 属性名称中不能含有`.`;如果含有,则必须使用名称 alias 将名称转义。
##### Regex 正则表达式<a name="regex"></a>
可以使用正则表达式的**命名捕获组**从任何字符串(文本)字段中提取多个字段。如图所示,从 nginx 日志中提取访问ip、时间戳、访问的url等字段。
@ -150,7 +152,11 @@ let v3 = data["voltage"].split(",");
使用 json 规则解析出的电压是字符串表达的带单位形式,最终入库希望能使用 int 类型记录电压值和电流值,便于统计分析,此时就需要对电压进一步拆分;另外日期期望拆分为日期和时间入库。
如下图所示可以对源字段`ts`使用 split 规则拆分成日期和时间,对字段`voltage`使用 regex 提取出电压值和电压单位。split 规则需要设置**分隔符**和**拆分数量**,拆分后的字段命名规则为`{原字段名}_{顺序号}`Regex 规则同解析过程中的一样,使用**命名捕获组**命名提取字段。
如下图所示
* 对字段`ts`使用 split 规则拆分成日期和时间。split 规则需要设置**分隔符**和**拆分数量**,拆分后的字段命名规则为`{原字段名}_{顺序号}`。
* 对字段`voltage`使用正则表达式 `^(?<voltage>[0-9]+)(?<voltage_unit>[a-zA-Z]+)$` 提取出电压值和电压单位Regex 规则同解析过程中的一样,使用**命名捕获组**命名提取字段。
* 对字段 `location` 使用 convert 转换,填写一个 JSON map 对象,其中 key 为字段 `current` 的值,`value` 为转换后的值。如图,`location` 字段的值 `"beijing.chaoyang.datun"` 被转换为 `"beijing.chaoyang.datunludong"`
![拆分和提取](./pic/transform-04.png)
@ -158,6 +164,14 @@ let v3 = data["voltage"].split(",");
过滤功能可以设置过滤条件,满足条件的数据行 才会被写入目标表。过滤条件表达式的结果必须是 boolean 类型。在编写过滤条件前,必须确定 解析字段的类型,根据解析字段的类型,可以使用判断函数、比较操作符(`>`、`>=`、`<=`、`<`、`==`、`!=`)来判断。
对时间戳过滤,可以采用以下函数。其中 ts 为符合 rfc3339 日期时间格式化字符串的字段t1 和 t2 为相对当前时间的秒数,时间范围为 now + t1 ~ now + t2.
```
between_time_range(ts, t1, t2)
// 例如:如果时间范围为最近 7 天内的才能入库,则过滤条件为:
between_time_range(ts, -604800, 0)
```
#### 字段类型及转换
只有明确解析出的每个字段的类型,才能使用正确的语法做数据过滤。

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 170 KiB

View File

@ -66,7 +66,7 @@ pidfile = /usr/local/taos/taosanode/taosanode.pid
# uWSGI log files
logto = /var/log/taos/taosanode/taosanode.log
# wWSGI monitor port
# uWSGI monitor port
stats = 127.0.0.1:8387
# python virtual environment directory, used by Anode
@ -86,7 +86,7 @@ log-level = DEBUG
**提示**
请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而导致 Anode 无法正常启动。
上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGIS官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。
上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数的设置及其说明请参考 [uWSGI 官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。
Anode 运行配置主要是以下:
- app-log: Anode 服务运行产生的日志,用户可以调整其到需要的位置
@ -110,7 +110,7 @@ SHOW ANODES;
taos> show anodes;
id | url | status | create_time | update_time |
==================================================================================================================
1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 |
1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 |
Query OK, 1 row(s) in set (0.037205s)
```

View File

@ -41,7 +41,7 @@ algo=expr1
"}
```
1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。
1. `column_expr`:预测的时序数据列,只支持数值类型列输入。
2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下:
### 参数说明

View File

@ -99,7 +99,7 @@ def test_myfc(self):
s = loader.get_service("myfc")
# 设置用于预测分析的数据
s.set_input_list(self.get_input_list())
s.set_input_list(self.get_input_list(), None)
# 检查预测结果应该全部为 1
r = s.set_params(
{"fc_rows": 10, "start_ts": 171000000, "time_step": 86400 * 30, "start_p": 0}

View File

@ -44,10 +44,10 @@ class _MyAnomalyDetectionService(AbstractAnomalyDetectionService):
def set_params(self, params):
"""该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
pass
return super().set_params(params)
```
将该文件保存在 `./lib/taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后应用就可以通过 SQL 语句调用该检测算法。
将该文件保存在 `./lib/taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口 taos 中执行 `SHOW ANODES FULL` 就能够看到新加入的算法,然后就可以通过 SQL 语句调用该算法。
```SQL
--- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类
@ -65,7 +65,7 @@ def test_myad(self):
s = loader.get_service("myad")
# 设置需要进行检测的输入数据
s.set_input_list(AnomalyDetectionTest.input_list)
s.set_input_list(AnomalyDetectionTest.input_list, None)
r = s.execute()

View File

@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.4.0</version>
<version>3.5.2</version>
</dependency>
```

View File

@ -15,6 +15,19 @@ import TabItem from "@theme/TabItem";
**Tips: 数据写入推荐使用参数绑定方式**
:::note
我们只推荐使用下面两种形式的 SQL 进行参数绑定写入:
```sql
一、确定子表存在:
1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?)
二、自动建表:
1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?)
2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?)
```
:::
下面我们继续以智能电表为例,展示各语言连接器使用参数绑定高效写入的功能:
1. 准备一个参数化的 SQL 插入语句,用于向超级表 `meters` 中插入数据。这个语句允许动态地指定子表名、标签和列值。
2. 循环生成多个子表及其对应的数据行。对于每个子表:
@ -26,10 +39,16 @@ import TabItem from "@theme/TabItem";
## WebSocket 连接
<Tabs defaultValue="java" groupId="lang">
<TabItem value="java" label="Java">
参数绑定有两种接口使用方式,一种是 JDBC 标准接口,一种是扩展接口,扩展接口性能更好一些。
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}}
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingStdInterfaceDemo.java:para_bind}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingExtendInterfaceDemo.java:para_bind}}
```
这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java)
@ -87,13 +106,24 @@ import TabItem from "@theme/TabItem";
<TabItem label="Python" value="python">
```python
{{#include docs/examples/python/stmt_native.py}}
{{#include docs/examples/python/stmt2_native.py}}
```
</TabItem>
<TabItem label="Go" value="go">
stmt2 绑定参数的示例代码如下go 连接器 v3.6.0 及以上TDengine v3.3.5.0 及以上):
```go
{{#include docs/examples/go/stmt2/native/main.go}}
```
stmt 绑定参数的示例代码如下:
```go
{{#include docs/examples/go/stmt/native/main.go}}
```
</TabItem>
<TabItem label="Rust" value="rust">

View File

@ -19,7 +19,7 @@ TDengine 面向多种写入场景而很多写入场景下TDengine 的存
```SQL
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY']
SHOW COMPACTS [compact_id]
SHOW COMPACT [compact_id]
KILL COMPACT compact_id
```

View File

@ -145,3 +145,47 @@ toasX 的配置文件(默认 /etc/taos/taosx.toml) 中与 monitor 相关的配
#### 限制
只有在以 server 模式运行 taosX 时,与监控相关的配置才生效。
## explorer 集成监控面板
explorer 支持集成已有的 grafana dashboard。
### 配置 grafana
编辑 grafana.ini, 修改以下配置项。配置 root_url, 可能对现有的 grafana 使用习惯有所影响,为了集成到 explorer 是需要如此配置的, 方便通过 explorer 做服务代理。
``` toml
[server]
# If you use reverse proxy and sub path specify full url (with sub path)
root_url = http://ip:3000/grafana
# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons.
serve_from_sub_path = true
[security]
# set to true if you want to allow browsers to render Grafana in a <frame>, <iframe>, <embed> or <object>. default is false.
allow_embedding = true
```
### 配置 Explorer
修改 explorer.toml, 其中 dashboard 配置的 url 中的 ip, 应该配置为可以通过 explorer 服务器能够访问到的 grafana 服务的内网地址。
``` toml
[grafana]
# The token of the Grafana server, which is used to access the Grafana server.
token = ""
# The URL of the Grafana dashboard, which is used to display the monitoring data of the TDengine cluster.
# You can configure multiple Grafana dashboards.
[grafana.dashboards]
TDengine3 = "http://ip:3000/d/000000001/tdengine3?theme=light&kiosk=tv"
taosX = "http://ip:3000/d/000000002/taosx?theme=light&kiosk=tv"
```
如下图(grafana V-8.5.27),获取 api key, 请注意添加只读权限的 apikey, 否则有安全风险。
![获取 grafana apikey](./pic/grafana-apikey.png)
如下图(grafana V-8.5.27),获取 dashboard url, 获取的 url 请额外加上参数theme=light&kiosk=tv.
![获取 grafana dashboard](./pic/grafana-dashboard.png)

View File

@ -35,119 +35,67 @@ taosdump -i /file/path -h localhost -P 6030
# 2. 基于 TDengine Enterprise 进行数据备份恢复
## 2.1. 概
## 2.1. 概
基于 TDengine 的数据订阅功能TDengine Enterprise 实现了数据的增量备份和恢复。用户可以通过 taosExplorer 对 TDengine
集群进行备份和恢复。
TDengine Enterprise 的备份和恢复功能包括以下几个概念:
1. 备份对象:用户可以对一个数据库,或者一个超级表进行备份。
2. 备份计划:用户可以为某个备份对象创建一个备份计划。备份计划从指定的时间点开始,周期性的执行一次备份任务,并生成一组备份文件。
3. 备份点:每执行一次备份任务,生成一组备份文件,它们对应一个时间点,称为**备份点**。第一个备份点称为**初始备份点**。
4. 备份文件:多个备份点,组成备份计划的备份文件。
5. 恢复任务:用户可以选择备份计划的某个备份点,创建一个恢复任务。恢复任务会从初始备份点开始,逐个应用备份点,恢复到指定的备份点。
1. 增量数据备份:基于 TDengine 的数据订阅功能,将**备份对象**的所有数据变更(包括:新增、修改、删除、元数据变更等)记录下来,生成备份文件。
2. 数据恢复:使用增量数据备份生成的备份文件,将**备份对象**恢复到指定的时间点。
3. 备份对象:用户备份的对象,可以是一个**数据库**,也可以是一个**超级表**。
4. 备份计划:用户为备份对象创建一个周期性执行的备份任务。备份计划从指定的时间点开始,以**备份周期**为间隔,周期性地执行备份任务。备份任务每次生成一个**备份点**。
5. 备份点:每次执行备份任务,生成一组备份文件,它们对应一个时间点,称为**备份点**。第一个备份点称为**初始备份点**。
6. 恢复任务:用户选择备份计划的某个备份点,创建一个恢复任务。恢复任务从**初始备份点**开始,逐个回放**备份文件**中的数据变更,直到指定的备份点结束。
![backup-zh-00.png](./pic/backup-00-concept.png "数据备份和恢复")
以上面的图为例:
以上图为例:
1. 用户创建了一个备份计划,从 2024-08-27 00:00:00 开始,每隔 1 天执行一次备份任务。
2. 在 2024-08-27 00:00:00 执行了第一次备份任务,生成了一个备份点。
3. 之后,每隔 1 天执行一次备份任务,生成了多个备份点,组成了备份文件
4. 用户可以选择某个备份点,创建一个恢复任务,恢复到指定的备份点
1. 用户创建了一个**备份计划**,从 2024-08-27 00:00:00 开始,每隔 1 天执行一次**备份任务**
2. 在 2024-08-27 00:00:00 执行了第一次备份任务,生成了一个**初始备份点**
3. 之后,每隔 1 天执行一次备份任务,生成了多个**备份点**
4. 用户可以选择某个**备份点**,创建一个**恢复任务**
5. 恢复任务会从初始备份点开始,逐个应用备份点,恢复到指定的备份点。
## 2.2. 备份计划
## 2.2. 数据备份
### 2.1.1. 创建
1. 通过浏览器访问 taosExplorer 服务,访问地址通常为 TDengine 集群所在 IP 地址的端口 6060如 http://localhost:6060。
2. 在 taosExplorer 服务页面中,进入“系统管理 - 备份”页面,点击“创建备份计划”按钮。
![backup-zh-01.png](./pic/backup-01-create.png "创建备份计划")
3. 在弹出的“创建备份计划”表单中,填写备份计划的相关信息。
![backup-zh-02.png](./pic/backup-02-form.png "填写备份计划信息")
通过浏览器访问 taosExplorer 服务,访问地址通常为 TDengine 集群所在 IP 地址的端口 6060如 http://localhost:6060。 在
taosExplorer 服务页面中,进入“系统管理 - 备份”页面,在“备份计划”标签页下,点击“创建备份计划”,填写备份计划的相关信息。
需要填写的信息包括:
* 数据库:需要备份的数据库名称。一个备份计划只能备份一个数据库/超级表。
* 超级表:需要备份的超级表名称。如果不填写,则备份整个数据库。
* 下次执行时间:首次执行备份任务的日期时间。
* 备份周期:备份点之间的时间间隔。注意:备份周期必须大于数据库的 WAL_RETENTION_PERIOD 参数值。
* 错误重试次数:对于可通过重试解决的错误,系统会按照此次数进行重试。
* 错误重试间隔:每次重试之间的时间间隔。
* 目录:存储备份文件的目录。
* 备份文件大小:备份文件的大小限制。当备份文件大小达到此限制时,会自动创建新的备份文件。
* 文件压缩等级:备份文件的压缩等级。支持:最快速度、最佳压缩比、兼具速度和压缩比。
1. 数据库:需要备份的数据库名称。一个备份计划只能备份一个数据库/超级表。
2. 超级表:需要备份的超级表名称。如果不填写,则备份整个数据库。
3. 下次执行时间:首次执行备份任务的日期时间。
4. 备份周期:备份点之间的时间间隔。注意:备份周期必须大于数据库的 WAL_RETENTION_PERIOD 参数值。
5. 错误重试次数:对于可通过重试解决的错误,系统会按照此次数进行重试。
6. 错误重试间隔:每次重试之间的时间间隔。
7. 目录:存储备份文件的目录。
8. 备份文件大小:备份文件的大小限制。当备份文件大小达到此限制时,会自动创建新的备份文件。
9. 文件压缩等级:备份文件的压缩等级。支持:最快速度、最佳压缩比、兼具速度和压缩比。
创建成功后,备份计划会开始按照配置的参数运行。
创建成功后,备份计划会开始按照配置的参数运行。在“备份计划”下的列表中,可以查看已创建的备份计划。
### 2.1.2. 查看
备份计划支持以下操作:
在“备份计划”下的列表中,可以查看已创建的备份计划。
1. 查看:显示备份计划的详细信息。
2. 修改:修改备份计划的配置。修改备份计划的配置后,当前运行的备份任务会先停止,然后按照新的配置重新运行。
3. 复制:以选中的备份计划为模版,创建新的备份计划。除了数据库和超级表需要用户选择以外,其他配置项和被复制的计划相同。
4. 删除:删除备份计划。删除备份计划时,可以选择是否删除关联的备份文件。
5. 指标:查看备份计划的统计指标。
6. 查看备份点:查看和备份计划关联的所有备份点。
![backup-zh-03.png](./pic/backup-03-list.png "查看备份计划列表")
## 2.3. 备份文件
点击“操作”中的“查看”按钮,可以查看备份计划的详细信息。
在“备份文件”列表中,可以查看备份文件的详细信息。
![backup-zh-04.png](./pic/backup-04-view.png "查看备份计划详情")
## 2.4. 数据恢复
### 2.1.3. 修改
在“备份文件”列表中,选择一个备份点,可以创建一个恢复任务,数据库恢复到指定的时间。
点击“操作”中的“修改”按钮,可以修改备份计划的配置。
![backup-zh-05.png](./pic/backup-05-edit.png "修改备份计划")
修改备份计划的配置后,当前运行的备份任务会先停止,然后按照新的配置重新运行。
### 2.1.4. 复制
点击“操作”中的“复制”按钮,可以复制备份计划。
![backup-zh-06.png](./pic/backup-06-copy.png "复制备份计划")
除了数据库和超级表被置为空外,其他配置项和被复制的计划相同。用户点击“确认”后,创建一个新的备份计划。
### 2.1.5. 删除
在操作中点击关闭按钮,可以停止当前备份计划。点击“操作”中的“删除”按钮,可以删除备份计划。
![backup-zh-07.png](./pic/backup-07-del.png "删除备份计划")
删除备份计划时,可以选择,是否删除关联的备份文件。
## 2.2. 备份文件
### 2.2.1. 查看
在备份计划列表中,选择要一个备份计划。在“备份文件”列中,点击“查看”按钮。可以查看和备份计划的所有备份点。
![backup-zh-08.png](./pic/backup-08-files.png "查看备份文件")
在备份文件列表中,可以查看备份文件的详细信息。
![backup-zh-09.png](./pic/backup-09-filelist.png "查看备份文件列表")
## 2.3. 恢复任务
### 2.3.1. 创建
在备份文件列表中,点击“操作”中的“恢复”按钮,可以创建一个恢复任务。
![backup-zh-10.png](./pic/backup-10-restore-create.png "创建恢复任务")
在弹出的对话框中,选择使用哪个备份点开始恢复,默认为最早的备份点。点击“确定”后,创建恢复任务,并跳转至“恢复任务”列表。
### 2.3.2. 查看
在“恢复任务”列表中,可以查看已创建的恢复任务。
![backup-zh-11.png](./pic/backup-11-restore-list.png "查看恢复任务列表")
恢复任务可以终止。点击“操作”中的开关,可以终止当前恢复任务。
在“恢复任务”列表中,可以查看已创建的恢复任务。恢复任务可以终止。
# 3. 常见错误排查

View File

@ -60,11 +60,15 @@ dataDir /mnt/data6 2 0
## 对象存储
本节介绍在 TDengine Enterprise 如何使用 S3 对象存储,本功能基于通用 S3 SDK 实现,对各个 S3 平台的访问参数进行了兼容适配,可以访问如 minio腾讯云 COSAmazon S3 等对象存储服务。通过适当的参数配置,可以把大部分较冷的时序数据存储到 S3 服务中
本节介绍在 TDengine Enterprise 版本中如何使用对象存储功能,如 Amazon S3、Azure Blob Storage、华为 OBS、腾讯云 COS、阿里云 OSS、MinIO 等对象存储服务
**注意** 在配合多级存储使用时,每一级存储介质上保存的数据都有可能被按规则备份到远程对象存储中并删除本地数据文件。
### 配置方式
### S3 对象存储
本功能基于通用 S3 SDK 实现,对各个 S3 平台的访问参数进行了兼容适配,通过适当的参数配置,可以把大部分较冷的时序数据存储到 S3 服务中。
#### 配置方式
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
@ -78,7 +82,7 @@ dataDir /mnt/data6 2 0
| s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期单位为秒。最小值600最大值100000。默认值 3600 |
| s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0表示关闭自动 S3 迁移,可配置为 1。 |
### 检查配置参数可用性
#### 检查配置参数可用性
在 taos.cfg 中完成对 S3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
@ -88,7 +92,7 @@ taosd --checks3
如果配置的 S3 服务无法访问,此命令会在运行过程中输出相应的错误信息。
### 创建使用 S3 的 DB
#### 创建使用 S3 的 DB
完成配置后,即可启动 TDengine 集群,创建使用 S3 的数据库,比如:
@ -112,11 +116,11 @@ s3migrate database <db_name>;
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作 |
### 对象存储读写次数估算
#### 对象存储读写次数估算
对象存储服务的使用成本与存储的数据量及请求次数相关,下面分别介绍数据的上传及下载过程。
#### 数据上传
##### 数据上传
当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。
@ -128,7 +132,7 @@ s3migrate database <db_name>;
其它类型的文件如 head, stt, sma 等,保留在本地文件系统,以加速预计算相关查询。
#### 数据下载
##### 数据下载
在查询操作中如果需要访问对象存储中的数据TSDB 不会下载整个数据文件,而是计算所需数据在文件中的位置,只下载相应的数据到 TSDB 页缓存中,然后将数据返回给查询执行引擎。后续查询首先检查页缓存,查看数据是否已被缓存。如果数据已缓存,则直接使用缓存中的数据,而无需重复从对象存储下载,从而有效降低从对象存储下载数据的次数。
@ -140,15 +144,15 @@ s3migrate database <db_name>;
页缓存是内存缓存,节点重启后,再次查询需要重新下载数据。缓存采用 LRU (Least Recently Used) 策略,当缓存空间不足时,最近最少使用的数据将被淘汰。缓存的大小可以通过 `s3PageCacheSize` 参数进行调整,通常来说,缓存越大,下载次数越少。
## Azure Blob 存储
本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
### Azure Blob 存储
本节介绍在 TDengine Enterprise 版本中如何使用微软 Azure Blob 存储。本功能可以通过两个方式使用:利用 Flexify 服务提供的 S3 网关功能和不依赖 Flexify 服务。通过配置参数,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
### Flexify 服务
#### Flexify 服务
Flexify 是 Azure Marketplace 中的一款应用程序,允许兼容 S3 的应用程序通过标准 S3 API 在 Azure Blob Storage 中存储数据。可使用多个 Flexify 服务对同一个 Blob 存储建立多个 S3 网关。
部署方式请参考 [Flexify](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/flexify.azure-s3-api?tab=Overview) 应用页面说明。
### 配置方式
##### 配置方式
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
@ -164,7 +168,7 @@ s3BucketName td-test
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
- 最大支持的 S3 服务配置数为 10
### 不依赖 Flexify 服务
#### 不依赖 Flexify 服务
用户界面同 S3不同的地方在于下面三个参数的配置

View File

@ -17,7 +17,7 @@ create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
相关参数说明如下。
- user_name用户名最长不超过 23 个字节。
- password密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
- password密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`(始自 3.3.5.0 版本)
- sysinfo 用户是否可以查看系统信息。1 表示可以查看0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点qnode以及与存储相关的信息等。默认为可以查看系统信息。
- createdb用户是否可以创建数据库。1 表示可以创建0 表示不可以创建。缺省值为 0。// 从 TDengine 企业版 3.3.2.0 开始支持

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Some files were not shown because too many files have changed in this diff Show More