commit
240a8b5d5b
|
@ -1,49 +0,0 @@
|
|||
version: 1.0.{build}
|
||||
image:
|
||||
- Visual Studio 2015
|
||||
- macos
|
||||
environment:
|
||||
matrix:
|
||||
- ARCH: amd64
|
||||
- ARCH: x86
|
||||
matrix:
|
||||
exclude:
|
||||
- image: macos
|
||||
ARCH: x86
|
||||
for:
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: Visual Studio 2015
|
||||
clone_folder: c:\dev\TDengine
|
||||
clone_depth: 1
|
||||
|
||||
init:
|
||||
- call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
|
||||
|
||||
before_build:
|
||||
- cd c:\dev\TDengine
|
||||
- md build
|
||||
|
||||
build_script:
|
||||
- cd build
|
||||
- cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false
|
||||
- nmake install
|
||||
-
|
||||
matrix:
|
||||
only:
|
||||
- image: macos
|
||||
clone_depth: 1
|
||||
|
||||
build_script:
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. > /dev/null
|
||||
- make > /dev/null
|
||||
notifications:
|
||||
- provider: Email
|
||||
to:
|
||||
- sangshuduo@gmail.com
|
||||
on_build_success: true
|
||||
on_build_failure: true
|
||||
on_build_status_changed: true
|
|
@ -1,13 +0,0 @@
|
|||
# Use the latest 2.1 version of CircleCI pipeline process engine. See: https://circleci.com/docs/2.0/configuration-reference
|
||||
version: 2.1
|
||||
# Use a package of configuration called an orb.
|
||||
orbs:
|
||||
# Declare a dependency on the welcome-orb
|
||||
welcome: circleci/welcome-orb@0.4.1
|
||||
# Orchestrate or schedule a set of jobs
|
||||
workflows:
|
||||
# Name the workflow "welcome"
|
||||
welcome:
|
||||
# Run the welcome/run job in its own container
|
||||
jobs:
|
||||
- welcome/run
|
266
.drone.yml
266
.drone.yml
|
@ -1,266 +0,0 @@
|
|||
---
|
||||
kind: pipeline
|
||||
name: test_amd64
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: gcc
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_bionic
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/ubuntu:bionic
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_focal
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/ubuntu:focal
|
||||
commands:
|
||||
- echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
- apt-get update
|
||||
- apt-get install -y -qq cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_centos7
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/centos:7
|
||||
commands:
|
||||
- yum install -y gcc gcc-c++ make cmake git
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_centos8
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm64v8/centos:8
|
||||
commands:
|
||||
- dnf install -y gcc gcc-c++ make cmake epel-release git libarchive
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch64 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm_bionic
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: arm
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: arm32v7/ubuntu:bionic
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake .. -DCPUTYPE=aarch32 > /dev/null
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_trusty
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ubuntu:trusty
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y gcc cmake3 build-essential git binutils-2.26
|
||||
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_xenial
|
||||
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ubuntu:xenial
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y gcc cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_bionic
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ubuntu:bionic
|
||||
commands:
|
||||
- apt-get update
|
||||
- apt-get install -y gcc cmake build-essential
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_centos7
|
||||
platform:
|
||||
os: linux
|
||||
arch: amd64
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: ansible/centos7-ansible
|
||||
commands:
|
||||
- yum install -y gcc gcc-c++ make cmake
|
||||
- mkdir debug
|
||||
- cd debug
|
||||
- cmake ..
|
||||
- make -j4
|
||||
trigger:
|
||||
event:
|
||||
- pull_request
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
- 3.0
|
|
@ -0,0 +1,26 @@
|
|||
# reference
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
|
||||
# merge team
|
||||
# @guanshengliang Shengliang Guan
|
||||
# @zitsen Linhe Huo
|
||||
# @wingwing2005 Ya Qiang Li
|
||||
# @feici02 WANG Xu
|
||||
# @hzcheng Hongze Cheng
|
||||
# @dapan1121 Pan Wei
|
||||
# @sheyanjie-qq She Yanjie
|
||||
# @pigzhou ZacharyZhou
|
||||
|
||||
* @taosdata/merge
|
||||
/.github/ @feici02
|
||||
/cmake/ @guanshengliang
|
||||
/contrib/ @guanshengliang
|
||||
/deps/ @guanshengliang
|
||||
/docs/ @guanshengliang @zitsen
|
||||
/examples/ @guanshengliang @zitsen
|
||||
/include/ @guanshengliang @hzcheng @dapan1121
|
||||
/packaging/ @feici02
|
||||
/source/ @guanshengliang @hzcheng @dapan1121
|
||||
/tests/ @guanshengliang @zitsen
|
||||
/tools/ @guanshengliang @zitsen
|
||||
/utils/ @guanshengliang
|
|
@ -0,0 +1,88 @@
|
|||
name: TDengine Build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
- 'tests/**'
|
||||
- '*.md'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and test on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-22.04
|
||||
- ubuntu-24.04
|
||||
- macos-13
|
||||
- macos-14
|
||||
- macos-15
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Install dependencies on Linux
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y build-essential cmake \
|
||||
libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \
|
||||
zlib1g-dev pkg-config libssl-dev gawk
|
||||
|
||||
- name: Install dependencies on macOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew update
|
||||
brew install argp-standalone gflags pkg-config snappy zlib geos jansson gawk openssl
|
||||
|
||||
- name: Build and install TDengine
|
||||
run: |
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_TOOLS=true \
|
||||
-DBUILD_KEEPER=true \
|
||||
-DBUILD_HTTP=false \
|
||||
-DBUILD_TEST=true \
|
||||
-DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
||||
which taosd
|
||||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
||||
sudo echo "supportVnodes 256" >> taos.cfg
|
||||
nohup sudo taosd -c taos.cfg &
|
||||
|
||||
- name: Start taosadapter
|
||||
run: nohup sudo taosadapter &
|
||||
|
||||
- name: Run tests with taosBenchmark
|
||||
run: |
|
||||
taosBenchmark -t 10 -n 10 -y
|
||||
taos -s "select count(*) from test.meters"
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if pgrep taosd; then sudo pkill taosd; fi
|
||||
if pgrep taosadapter; then sudo pkill taosadapter; fi
|
|
@ -1,4 +1,4 @@
|
|||
name: TaosKeeper CI
|
||||
name: taosKeeper CI
|
||||
|
||||
on:
|
||||
push:
|
||||
|
|
|
@ -156,6 +156,9 @@ pcre2_grep_test.sh
|
|||
pcre2_chartables.c
|
||||
geos-config
|
||||
config.h
|
||||
!contrib/xml2-cmake
|
||||
!contrib/xml2-cmake/linux_x86_64/include/config.h
|
||||
!contrib/xml2-cmake/CMakeLists.txt
|
||||
pcre2.h
|
||||
zconf.h
|
||||
version.h
|
||||
|
|
402
.lgtm.yml
402
.lgtm.yml
|
@ -1,402 +0,0 @@
|
|||
##########################################################################################
|
||||
# Customize file classifications. #
|
||||
# Results from files under any classifier will be excluded from LGTM #
|
||||
# statistics. #
|
||||
##########################################################################################
|
||||
|
||||
##########################################################################################
|
||||
# Use the `path_classifiers` block to define changes to the default classification of #
|
||||
# files. #
|
||||
##########################################################################################
|
||||
|
||||
path_classifiers:
|
||||
# docs:
|
||||
# Identify the top-level file called `generate_javadoc.py` as documentation-related.
|
||||
test:
|
||||
# Override LGTM's default classification of test files by excluding all files.
|
||||
- exclude: /
|
||||
# Classify all files in the top-level directories tests/ and testsuites/ as test code.
|
||||
- tests
|
||||
# - testsuites
|
||||
# Classify all files with suffix `.test` as test code.
|
||||
# Note: use only forward slash / as a path separator.
|
||||
# Use ** to indicate an arbitrary parent path.
|
||||
# Use * to indicate any sequence of characters excluding /.
|
||||
# Always enclose the expression in double quotes if it includes *.
|
||||
# - "**/*.test"
|
||||
# Refine the classifications above by excluding files in test/util/.
|
||||
# - exclude: test/util
|
||||
# The default behavior is to tag all files created during the
|
||||
# build as `generated`. Results are hidden for generated code. You can tag
|
||||
# further files as being generated by adding them to the `generated` section.
|
||||
generated:
|
||||
# Exclude all `*.c` files under the `ui/` directory from classification as
|
||||
# generated code.
|
||||
# - exclude: ui/**/*.c
|
||||
# By default, all files not checked into the repository are considered to be
|
||||
# 'generated'.
|
||||
# The default behavior is to tag library code as `library`. Results are hidden
|
||||
# for library code. You can tag further files as being library code by adding them
|
||||
# to the `library` section.
|
||||
library:
|
||||
- exclude: deps/
|
||||
# The default behavior is to tag template files as `template`. Results are hidden
|
||||
# for template files. You can tag further files as being template files by adding
|
||||
# them to the `template` section.
|
||||
template:
|
||||
#- exclude: path/to/template/code/**/*.c
|
||||
# Define your own category, for example: 'some_custom_category'.
|
||||
some_custom_category:
|
||||
# Classify all files in the top-level directory tools/ (or the top-level file
|
||||
# called tools).
|
||||
# - tools
|
||||
|
||||
#########################################################################################
|
||||
# Use the `queries` block to change the default display of query results. #
|
||||
#########################################################################################
|
||||
|
||||
# queries:
|
||||
# Start by hiding the results of all queries.
|
||||
# - exclude: "*"
|
||||
# Then include all queries tagged 'security' and 'correctness', and with a severity of
|
||||
# 'error'.
|
||||
# - include:
|
||||
# tags:
|
||||
# - "security"
|
||||
# - "correctness"
|
||||
# severity: "error"
|
||||
# Specifically hide the results of two queries.
|
||||
# - exclude: cpp/use-of-goto
|
||||
# - exclude: java/equals-on-unrelated-types
|
||||
# Refine by including the `java/command-line-injection` query.
|
||||
# - include: java/command-line-injection
|
||||
|
||||
#########################################################################################
|
||||
# Define changes to the default code extraction process. #
|
||||
# Each block configures the extraction of a single language, and modifies actions in a #
|
||||
# named step. Every named step includes automatic default actions, #
|
||||
# except for the 'prepare' step. The steps are performed in the following sequence: #
|
||||
# prepare #
|
||||
# after_prepare #
|
||||
# configure (C/C++ only) #
|
||||
# python_setup (Python only) #
|
||||
# before_index #
|
||||
# index #
|
||||
##########################################################################################
|
||||
|
||||
#########################################################################################
|
||||
# Environment variables available to the steps: #
|
||||
#########################################################################################
|
||||
|
||||
# LGTM_SRC
|
||||
# The root of the source tree.
|
||||
# LGTM_WORKSPACE
|
||||
# An existing (initially empty) folder outside the source tree.
|
||||
# Used for temporary download and setup commands.
|
||||
|
||||
#########################################################################################
|
||||
# Use the extraction block to define changes to the default code extraction process #
|
||||
# for one or more languages. The settings for each language are defined in a child #
|
||||
# block, with one or more steps. #
|
||||
#########################################################################################
|
||||
|
||||
extraction:
|
||||
# Define settings for C/C++ analysis
|
||||
#####################################
|
||||
cpp:
|
||||
# The `prepare` step exists for customization on LGTM.com only.
|
||||
prepare:
|
||||
# # The `packages` section is valid for LGTM.com only. It names Ubuntu packages to
|
||||
# # be installed.
|
||||
packages:
|
||||
- cmake
|
||||
# Add an `after-prepare` step if you need to run commands after the prepare step.
|
||||
# Each command should be listed on a separate line.
|
||||
# This step is useful for C/C++ analysis where you want to prepare the environment
|
||||
# for the `configure` step without changing the default behavior for that step.
|
||||
# after_prepare:
|
||||
#- export GNU_MAKE=make
|
||||
#- export GIT=true
|
||||
# The `configure` step generates build configuration files which the `index` step
|
||||
# then uses to build the codebase.
|
||||
configure:
|
||||
command:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- cmake ..
|
||||
# - ./prepare_deps
|
||||
# Optional step. You should add a `before_index` step if you need to run commands
|
||||
# before the `index` step.
|
||||
# before_index:
|
||||
# - export BOOST_DIR=$LGTM_SRC/boost
|
||||
# - export GTEST_DIR=$LGTM_SRC/googletest
|
||||
# - export HUNSPELL_DIR=$LGTM_SRC/hunspell
|
||||
# - export CRYPTOPP_DIR=$LGTM_SRC/cryptopp
|
||||
# The `index` step builds the code and extracts information during the build
|
||||
# process.
|
||||
index:
|
||||
# Override the autobuild process by specifying a list of custom build commands
|
||||
# to use instead.
|
||||
build_command:
|
||||
- cd build
|
||||
- make
|
||||
# - $GNU_MAKE -j2 -s
|
||||
# Specify that all project or solution files should be used for extraction.
|
||||
# Default: false.
|
||||
# all_solutions: true
|
||||
# Specify a list of one or more project or solution files for extraction.
|
||||
# Default: LGTM chooses the file closest to the root of the repository (this may
|
||||
# fail if there are multiple candidates).
|
||||
# solution:
|
||||
# - myProject.sln
|
||||
# Specify MSBuild settings
|
||||
# msbuild:
|
||||
# Specify a list of additional arguments to MSBuild. Default: empty.
|
||||
# arguments: /p:Platform=x64 /p:Configuration=Release
|
||||
# Specify the MSBuild configuration to use, for example, debug or release.
|
||||
# Default: read from the solution file or files.
|
||||
# configuration:
|
||||
# Specify the platform to target, for example: x86, x64, or Any CPU.
|
||||
# Default: read from the solution file or files.
|
||||
# platform:
|
||||
# Specify the MSBuild target. Default: rebuild.
|
||||
# target:
|
||||
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
|
||||
# nuget_restore: false
|
||||
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
|
||||
# build commands (build_command). For example:
|
||||
# 10 for Visual Studio 2010
|
||||
# 12 for Visual Studio 2012
|
||||
# 14 for Visual Studio 2015
|
||||
# 15 for Visual Studio 2017
|
||||
# Default: read from project files.
|
||||
# vstools_version: 10
|
||||
|
||||
# Define settings for C# analysis
|
||||
##################################
|
||||
# csharp:
|
||||
# The `prepare` step exists for customization on LGTM.com only.
|
||||
# prepare:
|
||||
# packages:
|
||||
# - example_package
|
||||
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
|
||||
# Each command should be listed on a separate line.
|
||||
# after_prepare:
|
||||
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
|
||||
# The `index` step builds the code and extracts information during the build
|
||||
# process.
|
||||
#index:
|
||||
# Specify that all project or solution files should be used for extraction.
|
||||
# Default: false.
|
||||
# all_solutions: true
|
||||
# Specify a list of one or more project or solution files for extraction.
|
||||
# Default: LGTM chooses the file closest to the root of the repository (this may
|
||||
# fail if there are multiple candidates).
|
||||
# solution:
|
||||
# - myProject.sln
|
||||
# Override the autobuild process by specifying a list of custom build commands
|
||||
# to use instead.
|
||||
# build_command:
|
||||
# - ./example-compile-all.sh
|
||||
# By default, LGTM analyzes the code by building it. You can override this,
|
||||
# and tell LGTM not to build the code. Beware that this can lead
|
||||
# to less accurate results.
|
||||
# buildless: true
|
||||
# Specify .NET Core settings.
|
||||
# dotnet:
|
||||
# Specify additional arguments to `dotnet build`.
|
||||
# Default: empty.
|
||||
# arguments: "example_arg"
|
||||
# Specify the version of .NET Core SDK to use.
|
||||
# Default: The version installed on the build machine.
|
||||
# version: 2.1
|
||||
# Specify MSBuild settings.
|
||||
# msbuild:
|
||||
# Specify a list of additional arguments to MSBuild. Default: empty.
|
||||
# arguments: /P:WarningLevel=2
|
||||
# Specify the MSBuild configuration to use, for example, debug or release.
|
||||
# Default: read from the solution file or files.
|
||||
# configuration: release
|
||||
# Specify the platform to target, for example: x86, x64, or Any CPU.
|
||||
# Default: read from the solution file or files.
|
||||
# platform: x86
|
||||
# Specify the MSBuild target. Default: rebuild.
|
||||
# target: notest
|
||||
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
|
||||
# nuget_restore: false
|
||||
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
|
||||
# build commands (build_command). For example:
|
||||
# 10 for Visual Studio 2010
|
||||
# 12 for Visual Studio 2012
|
||||
# 14 for Visual Studio 2015
|
||||
# 15 for Visual Studio 2017
|
||||
# Default: read from project files
|
||||
# vstools_version: 10
|
||||
# Specify additional options for the extractor,
|
||||
# for example --fast to perform a faster extraction that produces a smaller
|
||||
# database.
|
||||
# extractor: "--fast"
|
||||
|
||||
# Define settings for Go analysis
|
||||
##################################
|
||||
# go:
|
||||
# The `prepare` step exists for customization on LGTM.com only.
|
||||
# prepare:
|
||||
# packages:
|
||||
# - example_package
|
||||
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
|
||||
# Each command should be listed on a separate line.
|
||||
# after_prepare:
|
||||
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
|
||||
# The `index` step builds the code and extracts information during the build
|
||||
# process.
|
||||
# index:
|
||||
# Override the autobuild process by specifying a list of custom build commands
|
||||
# to use instead.
|
||||
# build_command:
|
||||
# - ./compile-all.sh
|
||||
|
||||
# Define settings for Java analysis
|
||||
####################################
|
||||
# java:
|
||||
# The `prepare` step exists for customization on LGTM.com only.
|
||||
# prepare:
|
||||
# packages:
|
||||
# - example_package
|
||||
# Add an `after-prepare` step if you need to run commands after the prepare step.
|
||||
# Each command should be listed on a separate line.
|
||||
# after_prepare:
|
||||
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
|
||||
# The `index` step extracts information from the files in the codebase.
|
||||
# index:
|
||||
# Specify Gradle settings.
|
||||
# gradle:
|
||||
# Specify the required Gradle version.
|
||||
# Default: determined automatically.
|
||||
# version: 4.4
|
||||
# Override the autobuild process by specifying a list of custom build commands
|
||||
# to use instead.
|
||||
# build_command: ./compile-all.sh
|
||||
# Specify the Java version required to build the project.
|
||||
# java_version: 11
|
||||
# Specify whether to extract Java .properties files
|
||||
# Default: false
|
||||
# properties_files: true
|
||||
# Specify Maven settings.
|
||||
# maven:
|
||||
# Specify the path (absolute or relative) of a Maven settings file to use.
|
||||
# Default: Maven uses a settings file in the default location, if it exists.
|
||||
# settings_file: /opt/share/settings.xml
|
||||
# Specify the path of a Maven toolchains file.
|
||||
# Default: Maven uses a toolchains file in the default location, if it exists.
|
||||
# toolchains_file: /opt/share/toolchains.xml
|
||||
# Specify the required Maven version.
|
||||
# Default: the Maven version is determined automatically, where feasible.
|
||||
# version: 3.5.2
|
||||
# Specify how XML files should be extracted:
|
||||
# all = extract all XML files.
|
||||
# default = only extract XML files named `AndroidManifest.xml`, `pom.xml`, and `web.xml`.
|
||||
# disabled = do not extract any XML files.
|
||||
# xml_mode: all
|
||||
|
||||
# Define settings for JavaScript analysis
|
||||
##########################################
|
||||
# javascript:
|
||||
# The `prepare` step exists for customization on LGTM.com only.
|
||||
# prepare:
|
||||
# packages:
|
||||
# - example_package
|
||||
# Add an `after-prepare` step if you need to run commands after the prepare step.
|
||||
# Each command should be listed on a separate line.
|
||||
# after_prepare:
|
||||
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
|
||||
# The `index` step extracts information from the files in the codebase.
|
||||
# index:
|
||||
# Specify a list of files and folders to extract.
|
||||
# Default: The project root directory.
|
||||
# include:
|
||||
# - src/js
|
||||
# Specify a list of files and folders to exclude from extraction.
|
||||
# exclude:
|
||||
# - thirdparty/lib
|
||||
# You can add additional file types for LGTM to extract, by mapping file
|
||||
# extensions (including the leading dot) to file types. The usual
|
||||
# include/exclude patterns apply, so, for example, `.jsm` files under
|
||||
# `thirdparty/lib` will not be extracted.
|
||||
# filetypes:
|
||||
# ".jsm": "js"
|
||||
# ".tmpl": "html"
|
||||
# Specify a list of glob patterns to include/exclude files from extraction; this
|
||||
# is applied on top of the include/exclude paths from above; patterns are
|
||||
# processed in the same way as for path classifiers above.
|
||||
# Default: include all files with known extensions (such as .js, .ts and .html),
|
||||
# but exclude files ending in `-min.js` or `.min.js` and folders named `node_modules`
|
||||
# or `bower_components`
|
||||
# filters:
|
||||
# exclude any *.ts files anywhere.
|
||||
# - exclude: "**/*.ts"
|
||||
# but include *.ts files under src/js/typescript.
|
||||
# - include: "src/js/typescript/**/*.ts"
|
||||
# Specify how TypeScript files should be extracted:
|
||||
# none = exclude all TypeScript files.
|
||||
# basic = extract syntactic information from TypeScript files.
|
||||
# full = extract syntactic and type information from TypeScript files.
|
||||
# Default: full.
|
||||
# typescript: basic
|
||||
# By default, LGTM doesn't extract any XML files. You can override this by
|
||||
# using the `xml_mode` property and setting it to `all`.
|
||||
# xml_mode: all
|
||||
|
||||
# Define settings for Python analysis
|
||||
######################################
|
||||
# python:
|
||||
# # The `prepare` step exists for customization on LGTM.com only.
|
||||
# # prepare:
|
||||
# # # The `packages` section is valid for LGTM.com only. It names packages to
|
||||
# # # be installed.
|
||||
# # packages: libpng-dev
|
||||
# # This step is useful for Python analysis where you want to prepare the
|
||||
# # environment for the `python_setup` step without changing the default behavior
|
||||
# # for that step.
|
||||
# after_prepare:
|
||||
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
|
||||
# # This sets up the Python interpreter and virtual environment, ready for the
|
||||
# # `index` step to extract the codebase.
|
||||
# python_setup:
|
||||
# # Specify packages that should NOT be installed despite being mentioned in the
|
||||
# # requirements.txt file.
|
||||
# # Default: no package marked for exclusion.
|
||||
# exclude_requirements:
|
||||
# - pywin32
|
||||
# # Specify a list of pip packages to install.
|
||||
# # If any of these packages cannot be installed, the extraction will fail.
|
||||
# requirements:
|
||||
# - Pillow
|
||||
# # Specify a list of requirements text files to use to set up the environment,
|
||||
# # or false for none. Default: any requirements.txt, test-requirements.txt,
|
||||
# # and similarly named files identified in the codebase are used.
|
||||
# requirements_files:
|
||||
# - required-packages.txt
|
||||
# # Specify a setup.py file to use to set up the environment, or false for none.
|
||||
# # Default: any setup.py files identified in the codebase are used in preference
|
||||
# # to any requirements text files.
|
||||
# setup_py: new-setup.py
|
||||
# # Override the version of the Python interpreter used for setup and extraction
|
||||
# # Default: Python 3.
|
||||
# version: 2
|
||||
# # Optional step. You should add a `before_index` step if you need to run commands
|
||||
# # before the `index` step.
|
||||
# before_index:
|
||||
# - antlr4 -Dlanguage=Python3 Grammar.g4
|
||||
# # The `index` step extracts information from the files in the codebase.
|
||||
# index:
|
||||
# # Specify a list of files and folders to exclude from extraction.
|
||||
# # Default: Git submodules and Subversion externals.
|
||||
# exclude:
|
||||
# - legacy-implementation
|
||||
# - thirdparty/libs
|
||||
# filters:
|
||||
# - exclude: "**/documentation/examples/snippets/*.py"
|
||||
# - include: "**/documentation/examples/test_application/*"
|
||||
# include:
|
||||
# - example/to/include
|
|
@ -75,4 +75,4 @@ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.ht
|
|||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
https://www.contributor-covenant.org/faq
|
||||
|
|
13
Jenkinsfile2
13
Jenkinsfile2
|
@ -7,7 +7,8 @@ file_zh_changed = ''
|
|||
file_en_changed = ''
|
||||
file_no_doc_changed = '1'
|
||||
file_only_tdgpt_change_except = '1'
|
||||
tdgpt_file = "forecastoperator.c\\|anomalywindowoperator.c\\|tanalytics.h\\|tanalytics.c\\|tdgpt_cases.task\\|analytics"
|
||||
tdgpt_file = "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics"
|
||||
|
||||
|
||||
def abortPreviousBuilds() {
|
||||
def currentJobName = env.JOB_NAME
|
||||
|
@ -69,7 +70,7 @@ def check_docs(){
|
|||
returnStdout: true
|
||||
)
|
||||
|
||||
file_no_doc_changed = sh (
|
||||
def file_no_doc_changed = sh (
|
||||
script: '''
|
||||
cd ${WKC}
|
||||
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" || :
|
||||
|
@ -80,7 +81,7 @@ def check_docs(){
|
|||
file_only_tdgpt_change_except = sh (
|
||||
script: '''
|
||||
cd ${WKC}
|
||||
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v ${tdgpt_file} || :
|
||||
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" ||:
|
||||
''',
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
@ -367,8 +368,8 @@ def pre_test_build_win() {
|
|||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%/tests/ci
|
||||
pip3 install taospy==2.7.16
|
||||
pip3 install taos-ws-py==0.3.5
|
||||
pip3 install taospy==2.7.21
|
||||
pip3 install taos-ws-py==0.3.8
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
|
@ -572,7 +573,7 @@ pipeline {
|
|||
cd ${WKC}/tests/parallel_test
|
||||
./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + '''
|
||||
'''
|
||||
if ( file_no_doc_changed =~ /orecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) {
|
||||
if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ) {
|
||||
sh '''
|
||||
cd ${WKC}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
|
|
424
README-CN.md
424
README-CN.md
|
@ -1,6 +1,5 @@
|
|||
<p>
|
||||
<p align="center">
|
||||
<a href="https://tdengine.com" target="_blank">
|
||||
<a href="https://www.taosdata.com" target="_blank">
|
||||
<img
|
||||
src="docs/assets/tdengine.svg"
|
||||
alt="TDengine"
|
||||
|
@ -8,16 +7,39 @@
|
|||
/>
|
||||
</a>
|
||||
</p>
|
||||
<p>
|
||||
|
||||
[](https://travis-ci.org/taosdata/TDengine)
|
||||
[](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/careers/)
|
||||
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
|
||||
# 目录
|
||||
|
||||
# TDengine 简介
|
||||
1. [TDengine 简介](#1-tdengine-简介)
|
||||
1. [文档](#2-文档)
|
||||
1. [必备工具](#3-必备工具)
|
||||
- [3.1 Linux预备](#31-linux系统)
|
||||
- [3.2 macOS预备](#32-macos系统)
|
||||
- [3.3 Windows预备](#33-windows系统)
|
||||
- [3.4 克隆仓库](#34-克隆仓库)
|
||||
1. [构建](#4-构建)
|
||||
- [4.1 Linux系统上构建](#41-linux系统上构建)
|
||||
- [4.2 macOS系统上构建](#42-macos系统上构建)
|
||||
- [4.3 Windows系统上构建](#43-windows系统上构建)
|
||||
1. [打包](#5-打包)
|
||||
1. [安装](#6-安装)
|
||||
- [6.1 Linux系统上安装](#61-linux系统上安装)
|
||||
- [6.2 macOS系统上安装](#62-macos系统上安装)
|
||||
- [6.3 Windows系统上安装](#63-windows系统上安装)
|
||||
1. [快速运行](#7-快速运行)
|
||||
- [7.1 Linux系统上运行](#71-linux系统上运行)
|
||||
- [7.2 macOS系统上运行](#72-macos系统上运行)
|
||||
- [7.3 Windows系统上运行](#73-windows系统上运行)
|
||||
1. [测试](#8-测试)
|
||||
1. [版本发布](#9-版本发布)
|
||||
1. [工作流](#10-工作流)
|
||||
1. [覆盖率](#11-覆盖率)
|
||||
1. [成为社区贡献者](#12-成为社区贡献者)
|
||||
|
||||
|
||||
# 1. 简介
|
||||
|
||||
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供缓存、数据订阅、流式计算等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。与其他时序数据库相比,TDengine 的主要优势如下:
|
||||
|
||||
|
@ -33,323 +55,335 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
|
|||
|
||||
- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
|
||||
# 文档
|
||||
了解TDengine高级功能的完整列表,请 [点击](https://tdengine.com/tdengine/)。体验TDengine最简单的方式是通过[TDengine云平台](https://cloud.tdengine.com)。
|
||||
|
||||
关于完整的使用手册,系统架构和更多细节,请参考 [TDengine 文档](https://docs.taosdata.com) 或者 [TDengine Documentation](https://docs.tdengine.com)。
|
||||
# 2. 文档
|
||||
|
||||
# 构建
|
||||
关于完整的使用手册,系统架构和更多细节,请参考 [TDengine](https://www.taosdata.com/) 或者 [TDengine 官方文档](https://docs.taosdata.com)。
|
||||
|
||||
用户可根据需求选择通过[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)、[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装或直接使用无需安装部署的[云服务](https://cloud.taosdata.com/)。本快速指南是面向想自己编译、打包、测试的开发者的。
|
||||
|
||||
如果想编译或测试TDengine连接器,请访问以下仓库: [JDBC连接器](https://github.com/taosdata/taos-connector-jdbc), [Go连接器](https://github.com/taosdata/driver-go), [Python连接器](https://github.com/taosdata/taos-connector-python), [Node.js连接器](https://github.com/taosdata/taos-connector-node), [C#连接器](https://github.com/taosdata/taos-connector-dotnet), [Rust连接器](https://github.com/taosdata/taos-connector-rust).
|
||||
|
||||
# 3. 前置条件
|
||||
|
||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
|
||||
|
||||
用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
|
||||
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
## 3.1 Linux系统
|
||||
|
||||
为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
|
||||
<details>
|
||||
|
||||
## 安装工具
|
||||
<summary>安装Linux必备工具</summary>
|
||||
|
||||
### Ubuntu 18.04 及以上版本 & Debian:
|
||||
### Ubuntu 18.04、20.04、22.04
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
sudo apt-get udpate
|
||||
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
|
||||
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
|
||||
```
|
||||
|
||||
#### 为 taos-tools 安装编译需要的软件
|
||||
|
||||
为了在 Ubuntu/Debian 系统上编译 [taos-tools](https://github.com/taosdata/taos-tools) 需要安装如下软件:
|
||||
### CentOS 8
|
||||
|
||||
```bash
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7.9
|
||||
|
||||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core
|
||||
yum config-manager --set-enabled powertools
|
||||
yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static
|
||||
```
|
||||
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
</details>
|
||||
|
||||
## 3.2 macOS系统
|
||||
|
||||
<details>
|
||||
|
||||
<summary>安装macOS必备工具</summary>
|
||||
|
||||
根据提示安装依赖工具 [brew](https://brew.sh/).
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel
|
||||
```
|
||||
|
||||
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
||||
|
||||
|
||||
#### CentOS 7.9
|
||||
|
||||
|
||||
```
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```
|
||||
sudo yum install -y epel-release
|
||||
sudo yum install -y dnf-plugins-core
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
注意:由于 snappy 缺乏 pkg-config 支持(参考 [链接](https://github.com/google/snappy/pull/86)),会导致 cmake 提示无法发现 libsnappy,实际上工作正常。
|
||||
|
||||
若 powertools 安装失败,可以尝试改用:
|
||||
```
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
#### CentOS + devtoolset
|
||||
|
||||
除上述编译依赖包,需要执行以下命令:
|
||||
|
||||
```
|
||||
sudo yum install centos-release-scl
|
||||
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
|
||||
scl enable devtoolset-9 -- bash
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone gflags pkgconfig
|
||||
```
|
||||
|
||||
### 设置 golang 开发环境
|
||||
</details>
|
||||
|
||||
TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。
|
||||
## 3.3 Windows系统
|
||||
|
||||
请使用 1.20 及以上版本。对于中国用户,我们建议使用代理来加速软件包下载。
|
||||
<details>
|
||||
|
||||
```
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.cn,direct
|
||||
```
|
||||
<summary>安装Windows必备工具</summary>
|
||||
|
||||
缺省是不会构建 taosAdapter, 但您可以使用以下命令选择构建 taosAdapter 作为 RESTful 接口的服务。
|
||||
进行中。
|
||||
|
||||
```
|
||||
cmake .. -DBUILD_HTTP=false
|
||||
```
|
||||
</details>
|
||||
|
||||
### 设置 rust 开发环境
|
||||
## 3.4 克隆仓库
|
||||
|
||||
TDengine 包含数个使用 Rust 语言开发的组件. 请参考 rust-lang.org 官方文档设置 rust 开发环境。
|
||||
|
||||
## 获取源码
|
||||
|
||||
首先,你需要从 GitHub 克隆源码:
|
||||
通过如下命令将TDengine仓库克隆到指定计算机:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
cd TDengine
|
||||
```
|
||||
如果使用 https 协议下载比较慢,可以通过修改 ~/.gitconfig 文件添加以下两行设置使用 ssh 协议下载。需要首先上传 ssh 密钥到 GitHub,详细方法请参考 GitHub 官方文档。
|
||||
|
||||
```
|
||||
[url "git@github.com:"]
|
||||
insteadOf = https://github.com/
|
||||
```
|
||||
## 特别说明
|
||||
# 4. 构建
|
||||
|
||||
[JDBC 连接器](https://github.com/taosdata/taos-connector-jdbc), [Go 连接器](https://github.com/taosdata/driver-go),[Python 连接器](https://github.com/taosdata/taos-connector-python),[Node.js 连接器](https://github.com/taosdata/taos-connector-node),[C# 连接器](https://github.com/taosdata/taos-connector-dotnet) ,[Rust 连接器](https://github.com/taosdata/taos-connector-rust) 和 [Grafana 插件](https://github.com/taosdata/grafanaplugin)已移到独立仓库。
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
|
||||
为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
|
||||
|
||||
## 构建 TDengine
|
||||
## 4.1 Linux系统上构建
|
||||
|
||||
### Linux 系统
|
||||
<details>
|
||||
|
||||
可以运行代码仓库中的 `build.sh` 脚本编译出 TDengine 和 taosTools(包含 taosBenchmark 和 taosdump)。
|
||||
<summary>Linux系统上构建步骤</summary>
|
||||
|
||||
可以通过以下命令使用脚本 `build.sh` 编译TDengine和taosTools,包括taosBenchmark和taosdump:
|
||||
|
||||
```bash
|
||||
./build.sh
|
||||
```
|
||||
|
||||
这个脚本等价于执行如下命令:
|
||||
也可以通过以下命令进行构建:
|
||||
|
||||
```bash
|
||||
mkdir debug
|
||||
cd debug
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
||||
make
|
||||
```
|
||||
|
||||
您也可以选择使用 jemalloc 作为内存分配器,替代默认的 glibc:
|
||||
可以使用Jemalloc作为内存分配器,而不是使用glibc:
|
||||
|
||||
```bash
|
||||
apt install autoconf
|
||||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
```
|
||||
|
||||
在 X86-64、X86、arm64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 等。
|
||||
|
||||
aarch64:
|
||||
TDengine构建脚本可以自动检测x86、x86-64、arm64平台上主机的体系结构。
|
||||
您也可以通过CPUTYPE选项手动指定架构:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
```
|
||||
|
||||
### Windows 系统
|
||||
</details>
|
||||
|
||||
如果你使用的是 Visual Studio 2013 版本:
|
||||
## 4.2 macOS系统上构建
|
||||
|
||||
打开 cmd.exe,执行 vcvarsall.bat 时,为 64 位操作系统指定“x86_amd64”,为 32 位操作系统指定“x86”。
|
||||
<details>
|
||||
|
||||
```bash
|
||||
<summary>macOS系统上构建步骤</summary>
|
||||
|
||||
请安装XCode命令行工具和cmake。使用XCode 11.4+在Catalina和Big Sur上完成验证。
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 4.3 Windows系统上构建
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上构建步骤</summary>
|
||||
|
||||
如果您使用的是Visual Studio 2013,请执行“cmd.exe”打开命令窗口执行如下命令。
|
||||
执行vcvarsall.bat时,64位的Windows请指定“amd64”,32位的Windows请指定“x86”。
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
|
||||
如果你使用的是 Visual Studio 2019 或 2017 版本:
|
||||
如果您使用Visual Studio 2019或2017:
|
||||
|
||||
打开 cmd.exe,执行 vcvarsall.bat 时,为 64 位操作系统指定“x64”,为 32 位操作系统指定“x86”。
|
||||
请执行“cmd.exe”打开命令窗口执行如下命令。
|
||||
执行vcvarsall.bat时,64位的Windows请指定“x64”,32位的Windows请指定“x86”。
|
||||
|
||||
```bash
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
|
||||
你也可以从开始菜单中找到"Visual Studio < 2019 | 2017 >"菜单项,根据你的系统选择"x64 Native Tools Command Prompt for VS < 2019 | 2017 >"或"x86 Native Tools Command Prompt for VS < 2019 | 2017 >",打开命令行窗口,执行:
|
||||
或者,您可以通过点击Windows开始菜单打开命令窗口->“Visual Studio < 2019 | 2017 >”文件夹->“x64原生工具命令提示符VS < 2019 | 2017 >”或“x86原生工具命令提示符VS < 2019 | 2017 >”取决于你的Windows是什么架构,然后执行命令如下:
|
||||
|
||||
```bash
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
</details>
|
||||
|
||||
### macOS 系统
|
||||
# 5. 打包
|
||||
|
||||
安装 XCode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
|
||||
由于一些组件依赖关系,TDengine社区安装程序不能仅由该存储库创建。我们仍在努力改进。
|
||||
|
||||
# 6. 安装
|
||||
|
||||
|
||||
## 6.1 Linux系统上安装
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
|
||||
```bash
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
sudo make install
|
||||
```
|
||||
从源代码安装还将为TDengine配置服务管理。用户也可以使用[TDengine安装包](https://docs.taosdata.com/get-started/package/)进行安装。
|
||||
|
||||
# 安装
|
||||
</details>
|
||||
|
||||
## Linux 系统
|
||||
## 6.2 macOS系统上安装
|
||||
|
||||
生成完成后,安装 TDengine:
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
|
||||
用户可以在[文件目录结构](https://docs.taosdata.com/reference/directory/)中了解更多在操作系统中生成的目录或文件。
|
||||
</details>
|
||||
|
||||
从源代码安装也会为 TDengine 配置服务管理 ,用户也可以选择[从安装包中安装](https://docs.taosdata.com/get-started/package/)。
|
||||
## 6.3 Windows系统上安装
|
||||
|
||||
安装成功后,在终端中启动 TDengine 服务:
|
||||
<details>
|
||||
|
||||
```bash
|
||||
sudo systemctl start taosd
|
||||
```
|
||||
<summary>Windows系统上安装详细步骤</summary>
|
||||
|
||||
用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果 TDengine CLI 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
|
||||
|
||||
## Windows 系统
|
||||
|
||||
生成完成后,安装 TDengine:
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
```
|
||||
|
||||
## macOS 系统
|
||||
</details>
|
||||
|
||||
生成完成后,安装 TDengine:
|
||||
# 7. 快速运行
|
||||
|
||||
## 7.1 Linux系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上运行详细步骤</summary>
|
||||
|
||||
在Linux系统上安装TDengine完成后,在终端运行如下命令启动服务:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
sudo systemctl start taosd
|
||||
```
|
||||
|
||||
用户可以在[文件目录结构](https://docs.taosdata.com/reference/directory/)中了解更多在操作系统中生成的目录或文件。
|
||||
|
||||
从源代码安装也会为 TDengine 配置服务管理 ,用户也可以选择[从安装包中安装](https://docs.taosdata.com/get-started/package/)。
|
||||
|
||||
安装成功后,可以在应用程序中双击 TDengine 图标启动服务,或者在终端中启动 TDengine 服务:
|
||||
|
||||
```bash
|
||||
sudo launchctl start com.tdengine.taosd
|
||||
```
|
||||
|
||||
用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:
|
||||
然后用户可以通过如下命令使用TDengine命令行连接TDengine服务:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果 TDengine CLI 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
|
||||
如果TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示连接错误信息。
|
||||
|
||||
## 快速运行
|
||||
|
||||
如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
|
||||
如果您不想将TDengine作为服务运行,您可以在当前终端中运行它。例如,要在构建完成后快速启动TDengine服务器,在终端中运行以下命令:(我们以Linux为例,Windows上的命令为 `taosd.exe`)
|
||||
|
||||
```bash
|
||||
./build/bin/taosd -c test/cfg
|
||||
```
|
||||
|
||||
在另一个终端,使用 TDengine CLI 连接服务器:
|
||||
在另一个终端上,使用TDengine命令行连接服务器:
|
||||
|
||||
```bash
|
||||
./build/bin/taos -c test/cfg
|
||||
```
|
||||
|
||||
"-c test/cfg"指定系统配置文件所在目录。
|
||||
选项 `-c test/cfg` 指定系统配置文件的目录。
|
||||
|
||||
# 体验 TDengine
|
||||
</details>
|
||||
|
||||
在 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。
|
||||
## 7.2 macOS系统上运行
|
||||
|
||||
```sql
|
||||
CREATE DATABASE demo;
|
||||
USE demo;
|
||||
CREATE TABLE t (ts TIMESTAMP, speed INT);
|
||||
INSERT INTO t VALUES('2019-07-15 00:00:00', 10);
|
||||
INSERT INTO t VALUES('2019-07-15 01:00:00', 20);
|
||||
SELECT * FROM t;
|
||||
ts | speed |
|
||||
===================================
|
||||
19-07-15 00:00:00.000| 10|
|
||||
19-07-15 01:00:00.000| 20|
|
||||
Query OK, 2 row(s) in set (0.001700s)
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上运行详细步骤</summary>
|
||||
|
||||
在macOS上安装完成后启动服务,双击/applications/TDengine启动程序,或者在终端中执行如下命令:
|
||||
|
||||
```bash
|
||||
sudo launchctl start com.tdengine.taosd
|
||||
```
|
||||
|
||||
# 应用开发
|
||||
然后在终端中使用如下命令通过TDengine命令行连接TDengine服务器:
|
||||
|
||||
## 官方连接器
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用:
|
||||
如果TDengine命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示错误信息。
|
||||
|
||||
- [Java](https://docs.taosdata.com/reference/connector/java/)
|
||||
- [C/C++](https://docs.taosdata.com/reference/connector/cpp/)
|
||||
- [Python](https://docs.taosdata.com/reference/connector/python/)
|
||||
- [Go](https://docs.taosdata.com/reference/connector/go/)
|
||||
- [Node.js](https://docs.taosdata.com/reference/connector/node/)
|
||||
- [Rust](https://docs.taosdata.com/reference/connector/rust/)
|
||||
- [C#](https://docs.taosdata.com/reference/connector/csharp/)
|
||||
- [RESTful API](https://docs.taosdata.com/reference/connector/rest-api/)
|
||||
</details>
|
||||
|
||||
# 成为社区贡献者
|
||||
|
||||
## 7.3 Windows系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上运行详细步骤</summary>
|
||||
|
||||
您可以使用以下命令在Windows平台上启动TDengine服务器:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taosd.exe -c test\cfg
|
||||
```
|
||||
|
||||
在另一个终端上,使用TDengine命令行连接服务器:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taos.exe -c test\cfg
|
||||
```
|
||||
|
||||
选项 `-c test/cfg` 指定系统配置文件的目录。
|
||||
|
||||
</details>
|
||||
|
||||
# 8. 测试
|
||||
|
||||
有关如何在TDengine上运行不同类型的测试,请参考 [TDengine测试](./tests/README-CN.md)
|
||||
|
||||
# 9. 版本发布
|
||||
|
||||
TDengine发布版本的完整列表,请参考 [版本列表](https://github.com/taosdata/TDengine/releases)
|
||||
|
||||
# 10. 工作流
|
||||
|
||||
TDengine构建检查工作流可以在参考 [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml), 更多的工作流正在创建中,将很快可用。
|
||||
|
||||
# 11. 覆盖率
|
||||
|
||||
最新的TDengine测试覆盖率报告可参考 [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
|
||||
|
||||
<details>
|
||||
|
||||
<summary>如何在本地运行测试覆盖率报告?</summary>
|
||||
|
||||
在本地创建测试覆盖率报告(HTML格式),请运行以下命令:
|
||||
|
||||
```bash
|
||||
cd tests
|
||||
bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
|
||||
# on main branch and run cases in longtimeruning_cases.task
|
||||
# for more infomation about options please refer to ./run_local_coverage.sh -h
|
||||
```
|
||||
> **注意:**
|
||||
> 请注意,-b和-i选项将使用-DCOVER=true选项重新编译TDengine,这可能需要花费一些时间。
|
||||
|
||||
</details>
|
||||
|
||||
# 12. 成为社区贡献者
|
||||
|
||||
点击 [这里](https://www.taosdata.com/contributor),了解如何成为 TDengine 的贡献者。
|
||||
|
||||
# 加入技术交流群
|
||||
|
||||
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
|
||||
|
|
395
README.md
395
README.md
|
@ -1,4 +1,3 @@
|
|||
<p>
|
||||
<p align="center">
|
||||
<a href="https://tdengine.com" target="_blank">
|
||||
<img
|
||||
|
@ -8,9 +7,13 @@
|
|||
/>
|
||||
</a>
|
||||
</p>
|
||||
<p>
|
||||
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
|
||||
[](https://github.com/feici02/TDengine/commits/main/)
|
||||
<br />
|
||||
[](https://github.com/taosdata/TDengine/releases)
|
||||
[](https://github.com/taosdata/TDengine/blob/main/LICENSE)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/4201)
|
||||
<br />
|
||||
[](https://twitter.com/tdenginedb)
|
||||
|
@ -23,24 +26,33 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine
|
|||
|
||||
# Table of Contents
|
||||
|
||||
1. [What is TDengine?](#1-what-is-tdengine)
|
||||
2. [Documentation](#2-documentation)
|
||||
3. [Building](#3-building)
|
||||
1. [Install build tools](#31-install-build-tools)
|
||||
1. [Get the source codes](#32-get-the-source-codes)
|
||||
1. [Special Note](#33-special-note)
|
||||
1. [Build TDengine](#34-build-tdengine)
|
||||
4. [Installing](#4-installing)
|
||||
1. [On Linux platform](#41-on-linux-platform)
|
||||
1. [On Windows platform](#42-on-windows-platform)
|
||||
1. [On macOS platform](#43-on-macos-platform)
|
||||
1. [Quick Run](#44-quick-run)
|
||||
5. [Try TDengine](#5-try-tdengine)
|
||||
6. [Developing with TDengine](#6-developing-with-tdengine)
|
||||
7. [Contribute to TDengine](#7-contribute-to-tdengine)
|
||||
8. [Join the TDengine Community](#8-join-the-tdengine-community)
|
||||
1. [Introduction](#1-introduction)
|
||||
1. [Documentation](#2-documentation)
|
||||
1. [Prerequisites](#3-prerequisites)
|
||||
- [3.1 Prerequisites On Linux](#31-on-linux)
|
||||
- [3.2 Prerequisites On macOS](#32-on-macos)
|
||||
- [3.3 Prerequisites On Windows](#33-on-windows)
|
||||
- [3.4 Clone the repo](#34-clone-the-repo)
|
||||
1. [Building](#4-building)
|
||||
- [4.1 Build on Linux](#41-build-on-linux)
|
||||
- [4.2 Build on macOS](#42-build-on-macos)
|
||||
- [4.3 Build On Windows](#43-build-on-windows)
|
||||
1. [Packaging](#5-packaging)
|
||||
1. [Installation](#6-installation)
|
||||
- [6.1 Install on Linux](#61-install-on-linux)
|
||||
- [6.2 Install on macOS](#62-install-on-macos)
|
||||
- [6.3 Install on Windows](#63-install-on-windows)
|
||||
1. [Running](#7-running)
|
||||
- [7.1 Run TDengine on Linux](#71-run-tdengine-on-linux)
|
||||
- [7.2 Run TDengine on macOS](#72-run-tdengine-on-macos)
|
||||
- [7.3 Run TDengine on Windows](#73-run-tdengine-on-windows)
|
||||
1. [Testing](#8-testing)
|
||||
1. [Releasing](#9-releasing)
|
||||
1. [Workflow](#10-workflow)
|
||||
1. [Coverage](#11-coverage)
|
||||
1. [Contributing](#12-contributing)
|
||||
|
||||
# 1. What is TDengine?
|
||||
# 1. Introduction
|
||||
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
|
||||
|
||||
|
@ -62,132 +74,85 @@ For a full list of TDengine competitive advantages, please [check here](https://
|
|||
|
||||
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
|
||||
|
||||
# 3. Building
|
||||
You can choose to install TDengine via [container](https://docs.tdengine.com/get-started/deploy-in-docker/), [installation package](https://docs.tdengine.com/get-started/deploy-from-package/), [Kubernetes](https://docs.tdengine.com/operations-and-maintenance/deploy-your-cluster/#kubernetes-deployment) or try [fully managed service](https://cloud.tdengine.com/) without installation. This quick guide is for developers who want to contribute, build, release and test TDengine by themselves.
|
||||
|
||||
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||
For contributing/building/testing TDengine Connectors, please check the following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust).
|
||||
|
||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
||||
# 3. Prerequisites
|
||||
|
||||
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
|
||||
At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||
|
||||
To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory.
|
||||
## 3.1 On Linux
|
||||
|
||||
## 3.1 Install build tools
|
||||
<details>
|
||||
|
||||
### Ubuntu 18.04 and above or Debian
|
||||
<summary>Install required tools on Linux</summary>
|
||||
|
||||
### For Ubuntu 18.04、20.04、22.04
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
|
||||
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools
|
||||
|
||||
To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
|
||||
### For CentOS 8
|
||||
|
||||
```bash
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7.9
|
||||
|
||||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core
|
||||
yum config-manager --set-enabled powertools
|
||||
yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static
|
||||
```
|
||||
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
</details>
|
||||
|
||||
## 3.2 On macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Install required tools on macOS</summary>
|
||||
|
||||
Please intall the dependencies with [brew](https://brew.sh/).
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools on CentOS
|
||||
|
||||
#### CentOS 7.9
|
||||
|
||||
```
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```
|
||||
sudo yum install -y epel-release
|
||||
sudo yum install -y dnf-plugins-core
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well.
|
||||
|
||||
If the PowerTools installation fails, you can try to use:
|
||||
|
||||
```
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
#### For CentOS + devtoolset
|
||||
|
||||
Besides above dependencies, please run following commands:
|
||||
|
||||
```
|
||||
sudo yum install centos-release-scl
|
||||
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
|
||||
scl enable devtoolset-9 -- bash
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone gflags pkgconfig
|
||||
```
|
||||
|
||||
### Setup golang environment
|
||||
</details>
|
||||
|
||||
TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup.
|
||||
## 3.3 On Windows
|
||||
|
||||
Please use version 1.20+. For the user in China, we recommend using a proxy to accelerate package downloading.
|
||||
<details>
|
||||
|
||||
```
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.cn,direct
|
||||
```
|
||||
<summary>Install required tools on Windows</summary>
|
||||
|
||||
The default will not build taosAdapter, but you can use the following command to build taosAdapter as the service for RESTful interface.
|
||||
Work in Progress.
|
||||
|
||||
```
|
||||
cmake .. -DBUILD_HTTP=false
|
||||
```
|
||||
</details>
|
||||
|
||||
### Setup rust environment
|
||||
## 3.4 Clone the repo
|
||||
|
||||
TDengine includes a few components developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup.
|
||||
|
||||
## 3.2 Get the source codes
|
||||
|
||||
First of all, you may clone the source codes from github:
|
||||
Clone the repository to the target machine:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
cd TDengine
|
||||
```
|
||||
|
||||
You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail.
|
||||
</details>
|
||||
|
||||
```
|
||||
[url "git@github.com:"]
|
||||
insteadOf = https://github.com/
|
||||
```
|
||||
# 4. Building
|
||||
|
||||
## 3.3 Special Note
|
||||
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
|
||||
|
||||
[JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository.
|
||||
TDengine requires [GCC](https://gcc.gnu.org/) 9.3.1 or higher and [CMake](https://cmake.org/) 3.13.0 or higher for building.
|
||||
|
||||
## 3.4 Build TDengine
|
||||
## 4.1 Build on Linux
|
||||
|
||||
### On Linux platform
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to build on Linux</summary>
|
||||
|
||||
You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below:
|
||||
|
||||
|
@ -198,29 +163,46 @@ You can run the bash script `build.sh` to build both TDengine and taosTools incl
|
|||
It equals to execute following commands:
|
||||
|
||||
```bash
|
||||
mkdir debug
|
||||
cd debug
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
||||
make
|
||||
```
|
||||
|
||||
You can use Jemalloc as memory allocator instead of glibc:
|
||||
|
||||
```
|
||||
apt install autoconf
|
||||
```bash
|
||||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
```
|
||||
|
||||
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform.
|
||||
You can also specify CPUTYPE option like aarch64 too if the detection result is not correct:
|
||||
|
||||
aarch64:
|
||||
TDengine build script can auto-detect the host machine's architecture on x86, x86-64, arm64 platform.
|
||||
You can also specify architecture manually by CPUTYPE option:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
```
|
||||
|
||||
### On Windows platform
|
||||
</details>
|
||||
|
||||
## 4.2 Build on macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to build on macOS</summary>
|
||||
|
||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 4.3 Build on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to build on Windows</summary>
|
||||
|
||||
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
|
||||
Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat.
|
||||
|
@ -251,31 +233,67 @@ mkdir debug && cd debug
|
|||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
</details>
|
||||
|
||||
### On macOS platform
|
||||
# 5. Packaging
|
||||
|
||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||
The TDengine community installer can NOT be created by this repository only, due to some component dependencies. We are still working on this improvement.
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
# 6. Installation
|
||||
|
||||
# 4. Installing
|
||||
## 6.1 Install on Linux
|
||||
|
||||
## 4.1 On Linux platform
|
||||
<details>
|
||||
|
||||
After building successfully, TDengine can be installed by
|
||||
<summary>Detailed steps to install on Linux</summary>
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||
Installing from source code will also configure service management for TDengine. Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it.
|
||||
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||
</details>
|
||||
|
||||
To start the service after installation, in a terminal, use:
|
||||
## 6.2 Install on macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to install on macOS</summary>
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 6.3 Install on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to install on windows</summary>
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
# 7. Running
|
||||
|
||||
## 7.1 Run TDengine on Linux
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to run on Linux</summary>
|
||||
|
||||
To start the service after installation on linux, in a terminal, use:
|
||||
|
||||
```bash
|
||||
sudo systemctl start taosd
|
||||
|
@ -289,27 +307,29 @@ taos
|
|||
|
||||
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
|
||||
|
||||
## 4.2 On Windows platform
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
```
|
||||
|
||||
## 4.3 On macOS platform
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
./build/bin/taosd -c test/cfg
|
||||
```
|
||||
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||
In another terminal, use the TDengine CLI to connect the server:
|
||||
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||
```bash
|
||||
./build/bin/taos -c test/cfg
|
||||
```
|
||||
|
||||
To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use:
|
||||
Option `-c test/cfg` specifies the system configuration file directory.
|
||||
|
||||
</details>
|
||||
|
||||
## 7.2 Run TDengine on macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to run on macOS</summary>
|
||||
|
||||
To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use:
|
||||
|
||||
```bash
|
||||
sudo launchctl start com.tdengine.taosd
|
||||
|
@ -323,64 +343,63 @@ taos
|
|||
|
||||
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
|
||||
|
||||
## 4.4 Quick Run
|
||||
</details>
|
||||
|
||||
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
|
||||
|
||||
```bash
|
||||
./build/bin/taosd -c test/cfg
|
||||
## 7.3 Run TDengine on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to run on windows</summary>
|
||||
|
||||
You can start TDengine server on Windows platform with below commands:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taosd.exe -c test\cfg
|
||||
```
|
||||
|
||||
In another terminal, use the TDengine CLI to connect the server:
|
||||
|
||||
```bash
|
||||
./build/bin/taos -c test/cfg
|
||||
```cmd
|
||||
.\build\bin\taos.exe -c test\cfg
|
||||
```
|
||||
|
||||
option "-c test/cfg" specifies the system configuration file directory.
|
||||
|
||||
# 5. Try TDengine
|
||||
</details>
|
||||
|
||||
It is easy to run SQL commands from TDengine CLI which is the same as other SQL databases.
|
||||
# 8. Testing
|
||||
|
||||
```sql
|
||||
CREATE DATABASE demo;
|
||||
USE demo;
|
||||
CREATE TABLE t (ts TIMESTAMP, speed INT);
|
||||
INSERT INTO t VALUES('2019-07-15 00:00:00', 10);
|
||||
INSERT INTO t VALUES('2019-07-15 01:00:00', 20);
|
||||
SELECT * FROM t;
|
||||
ts | speed |
|
||||
===================================
|
||||
19-07-15 00:00:00.000| 10|
|
||||
19-07-15 01:00:00.000| 20|
|
||||
Query OK, 2 row(s) in set (0.001700s)
|
||||
For how to run different types of tests on TDengine, please see [Testing TDengine](./tests/README.md).
|
||||
|
||||
# 9. Releasing
|
||||
|
||||
For the complete list of TDengine Releases, please see [Releases](https://github.com/taosdata/TDengine/releases).
|
||||
|
||||
# 10. Workflow
|
||||
|
||||
TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). More workflows will be available soon.
|
||||
|
||||
# 11. Coverage
|
||||
|
||||
Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
|
||||
|
||||
<details>
|
||||
|
||||
<summary>How to run the coverage report locally?</summary>
|
||||
To create the test coverage report (in HTML format) locally, please run following commands:
|
||||
|
||||
```bash
|
||||
cd tests
|
||||
bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
|
||||
# on main branch and run cases in longtimeruning_cases.task
|
||||
# for more infomation about options please refer to ./run_local_coverage.sh -h
|
||||
```
|
||||
> **NOTE:**
|
||||
> Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time.
|
||||
|
||||
# 6. Developing with TDengine
|
||||
</details>
|
||||
|
||||
## Official Connectors
|
||||
# 12. Contributing
|
||||
|
||||
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
|
||||
|
||||
- [Java](https://docs.tdengine.com/reference/connectors/java/)
|
||||
- [C/C++](https://docs.tdengine.com/reference/connectors/cpp/)
|
||||
- [Python](https://docs.tdengine.com/reference/connectors/python/)
|
||||
- [Go](https://docs.tdengine.com/reference/connectors/go/)
|
||||
- [Node.js](https://docs.tdengine.com/reference/connectors/node/)
|
||||
- [Rust](https://docs.tdengine.com/reference/connectors/rust/)
|
||||
- [C#](https://docs.tdengine.com/reference/connectors/csharp/)
|
||||
- [RESTful API](https://docs.tdengine.com/reference/connectors/rest-api/)
|
||||
|
||||
# 7. Contribute to TDengine
|
||||
|
||||
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
|
||||
|
||||
# 8. Join the TDengine Community
|
||||
|
||||
For more information about TDengine, you can follow us on social media and join our Discord server:
|
||||
|
||||
- [Discord](https://discord.com/invite/VZdSuUg4pS)
|
||||
- [Twitter](https://twitter.com/TDengineDB)
|
||||
- [LinkedIn](https://www.linkedin.com/company/tdengine/)
|
||||
- [YouTube](https://www.youtube.com/@tdengine)
|
||||
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# addr2line
|
||||
ExternalProject_Add(addr2line
|
||||
GIT_REPOSITORY https://github.com/davea42/libdwarf-addr2line.git
|
||||
GIT_TAG master
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/addr2line"
|
||||
CONFIGURE_COMMAND ""
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
ExternalProject_Add(azure
|
||||
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
|
||||
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
|
||||
DEPENDS xml2
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"
|
||||
|
|
|
@ -166,6 +166,10 @@ IF(${BUILD_WITH_ANALYSIS})
|
|||
set(BUILD_WITH_S3 ON)
|
||||
ENDIF()
|
||||
|
||||
IF(${TD_LINUX})
|
||||
set(BUILD_WITH_ANALYSIS ON)
|
||||
ENDIF()
|
||||
|
||||
IF(${BUILD_S3})
|
||||
|
||||
IF(${BUILD_WITH_S3})
|
||||
|
@ -205,13 +209,6 @@ option(
|
|||
off
|
||||
)
|
||||
|
||||
|
||||
option(
|
||||
BUILD_WITH_NURAFT
|
||||
"If build with NuRaft"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_UV
|
||||
"If build with libuv"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.3.5.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.3.5.2.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -12,7 +12,7 @@ ExternalProject_Add(curl2
|
|||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
|
||||
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl --without-librtmp #--enable-debug
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
|
|
|
@ -6,9 +6,9 @@ ExternalProject_Add(openssl
|
|||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/openssl"
|
||||
BUILD_IN_SOURCE TRUE
|
||||
#BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
|
||||
BUILD_ALWAYS 1
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install_sw -j
|
||||
TEST_COMMAND ""
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
|
||||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -1,19 +1,16 @@
|
|||
|
||||
# xml2
|
||||
ExternalProject_Add(xml2
|
||||
URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz
|
||||
URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6
|
||||
#https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz
|
||||
#GIT_REPOSITORY https://github.com/GNOME/libxml2
|
||||
#GIT_TAG v2.11.5
|
||||
URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.10.4.tar.gz
|
||||
URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/xml2"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -17,7 +17,6 @@ elseif(${BUILD_WITH_COS})
|
|||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
|
@ -43,11 +42,6 @@ endif()
|
|||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
# taos-tools
|
||||
if(${BUILD_TOOLS})
|
||||
cat("${TD_SUPPORT_DIR}/taostools_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# taosws-rs
|
||||
if(${WEBSOCKET})
|
||||
cat("${TD_SUPPORT_DIR}/taosws_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -146,11 +140,16 @@ if(${BUILD_WITH_SQLITE})
|
|||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
# libcurl
|
||||
if(NOT ${TD_WINDOWS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
|
||||
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(NOT ${TD_WINDOWS})
|
||||
|
||||
# s3
|
||||
if(${BUILD_WITH_S3})
|
||||
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_S3)
|
||||
|
@ -160,7 +159,6 @@ elseif(${BUILD_WITH_COS})
|
|||
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
endif()
|
||||
|
@ -199,12 +197,26 @@ endif()
|
|||
# lemon
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers
|
||||
IF(${TD_DARWIN})
|
||||
SET(CONTRIB_CONFIG_ENV "CC=cc")
|
||||
ENDIF()
|
||||
|
||||
# download dependencies
|
||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download"
|
||||
RESULT_VARIABLE result)
|
||||
IF(NOT result EQUAL "0")
|
||||
message(FATAL_ERROR "CMake step for dowloading dependencies failed: ${result}")
|
||||
ENDIF()
|
||||
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download"
|
||||
RESULT_VARIABLE result)
|
||||
IF(NOT result EQUAL "0")
|
||||
message(FATAL_ERROR "CMake step for building dependencies failed: ${result}")
|
||||
ENDIF()
|
||||
|
||||
# ================================================================================================
|
||||
# Build
|
||||
|
@ -652,7 +664,12 @@ if(${BUILD_PCRE2})
|
|||
endif(${BUILD_PCRE2})
|
||||
|
||||
if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||
set(ORIG_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
|
||||
string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||
add_subdirectory(xml2-cmake)
|
||||
set(CMAKE_C_FLAGS ${ORIG_CMAKE_C_FLAGS})
|
||||
|
||||
add_subdirectory(azure-cmake)
|
||||
endif()
|
||||
|
||||
IF(TD_LINUX)
|
||||
|
|
|
@ -36,10 +36,6 @@ target_include_directories(
|
|||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CURL_LIBRARY curl)
|
||||
# find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
|
@ -50,9 +46,8 @@ target_link_libraries(
|
|||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
|
||||
# PRIVATE xml2
|
||||
PRIVATE _libxml2
|
||||
PRIVATE zlib
|
||||
|
||||
# PRIVATE ${CoreFoundation_Library}
|
||||
|
|
|
@ -20,14 +20,6 @@ if(${BUILD_WITH_SQLITE})
|
|||
add_subdirectory(sqlite)
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
if(${BUILD_WITH_CRAFT})
|
||||
add_subdirectory(craft)
|
||||
endif(${BUILD_WITH_CRAFT})
|
||||
|
||||
if(${BUILD_WITH_TRAFT})
|
||||
# add_subdirectory(traft)
|
||||
endif(${BUILD_WITH_TRAFT})
|
||||
|
||||
if(${BUILD_S3})
|
||||
add_subdirectory(azure)
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2")
|
||||
|
||||
set(SRCS
|
||||
"${LIBXML2_SOURCE_DIR}/SAX.c"
|
||||
"${LIBXML2_SOURCE_DIR}/entities.c"
|
||||
"${LIBXML2_SOURCE_DIR}/encoding.c"
|
||||
"${LIBXML2_SOURCE_DIR}/error.c"
|
||||
"${LIBXML2_SOURCE_DIR}/parserInternals.c"
|
||||
"${LIBXML2_SOURCE_DIR}/parser.c"
|
||||
"${LIBXML2_SOURCE_DIR}/tree.c"
|
||||
"${LIBXML2_SOURCE_DIR}/hash.c"
|
||||
"${LIBXML2_SOURCE_DIR}/list.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlIO.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlmemory.c"
|
||||
"${LIBXML2_SOURCE_DIR}/uri.c"
|
||||
"${LIBXML2_SOURCE_DIR}/valid.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xlink.c"
|
||||
"${LIBXML2_SOURCE_DIR}/HTMLparser.c"
|
||||
"${LIBXML2_SOURCE_DIR}/HTMLtree.c"
|
||||
"${LIBXML2_SOURCE_DIR}/debugXML.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xpath.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xpointer.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xinclude.c"
|
||||
"${LIBXML2_SOURCE_DIR}/nanohttp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/nanoftp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/catalog.c"
|
||||
"${LIBXML2_SOURCE_DIR}/globals.c"
|
||||
"${LIBXML2_SOURCE_DIR}/threads.c"
|
||||
"${LIBXML2_SOURCE_DIR}/c14n.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlstring.c"
|
||||
"${LIBXML2_SOURCE_DIR}/buf.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlregexp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlschemas.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlschemastypes.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlunicode.c"
|
||||
"${LIBXML2_SOURCE_DIR}/triostr.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlreader.c"
|
||||
"${LIBXML2_SOURCE_DIR}/relaxng.c"
|
||||
"${LIBXML2_SOURCE_DIR}/dict.c"
|
||||
"${LIBXML2_SOURCE_DIR}/SAX2.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlwriter.c"
|
||||
"${LIBXML2_SOURCE_DIR}/legacy.c"
|
||||
"${LIBXML2_SOURCE_DIR}/chvalid.c"
|
||||
"${LIBXML2_SOURCE_DIR}/pattern.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlsave.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlmodule.c"
|
||||
"${LIBXML2_SOURCE_DIR}/schematron.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xzlib.c"
|
||||
)
|
||||
add_library(_libxml2 ${SRCS})
|
||||
|
||||
#target_link_libraries(_libxml2 PRIVATE td_contrib::zlib)
|
||||
target_link_libraries(_libxml2 PRIVATE zlib)
|
||||
|
||||
target_include_directories(_libxml2 BEFORE PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include")
|
||||
target_include_directories(_libxml2 BEFORE PUBLIC "${LIBXML2_SOURCE_DIR}/include")
|
||||
|
||||
add_library(td_contrib::libxml2 ALIAS _libxml2)
|
|
@ -0,0 +1,285 @@
|
|||
/* config.h. Generated from config.h.in by configure. */
|
||||
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Type cast for the gethostbyname() argument */
|
||||
#define GETHOSTBYNAME_ARG_CAST /**/
|
||||
|
||||
/* Define to 1 if you have the <arpa/inet.h> header file. */
|
||||
#define HAVE_ARPA_INET_H 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/nameser.h> header file. */
|
||||
#define HAVE_ARPA_NAMESER_H 1
|
||||
|
||||
/* Whether struct sockaddr::__ss_family exists */
|
||||
/* #undef HAVE_BROKEN_SS_FAMILY */
|
||||
|
||||
/* Define to 1 if you have the <ctype.h> header file. */
|
||||
#define HAVE_CTYPE_H 1
|
||||
|
||||
/* Define to 1 if you have the <dirent.h> header file. */
|
||||
#define HAVE_DIRENT_H 1
|
||||
|
||||
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||
#define HAVE_DLFCN_H 1
|
||||
|
||||
/* Have dlopen based dso */
|
||||
#define HAVE_DLOPEN /**/
|
||||
|
||||
/* Define to 1 if you have the <dl.h> header file. */
|
||||
/* #undef HAVE_DL_H */
|
||||
|
||||
/* Define to 1 if you have the <errno.h> header file. */
|
||||
#define HAVE_ERRNO_H 1
|
||||
|
||||
/* Define to 1 if you have the <fcntl.h> header file. */
|
||||
#define HAVE_FCNTL_H 1
|
||||
|
||||
/* Define to 1 if you have the <float.h> header file. */
|
||||
#define HAVE_FLOAT_H 1
|
||||
|
||||
/* Define to 1 if you have the `fprintf' function. */
|
||||
#define HAVE_FPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `ftime' function. */
|
||||
#define HAVE_FTIME 1
|
||||
|
||||
/* Define if getaddrinfo is there */
|
||||
#define HAVE_GETADDRINFO /**/
|
||||
|
||||
/* Define to 1 if you have the `gettimeofday' function. */
|
||||
#define HAVE_GETTIMEOFDAY 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the `isascii' function. */
|
||||
#define HAVE_ISASCII 1
|
||||
|
||||
/* Define if isinf is there */
|
||||
#define HAVE_ISINF /**/
|
||||
|
||||
/* Define if isnan is there */
|
||||
#define HAVE_ISNAN /**/
|
||||
|
||||
/* Define if history library is there (-lhistory) */
|
||||
/* #undef HAVE_LIBHISTORY */
|
||||
|
||||
/* Define if pthread library is there (-lpthread) */
|
||||
#define HAVE_LIBPTHREAD /**/
|
||||
|
||||
/* Define if readline library is there (-lreadline) */
|
||||
/* #undef HAVE_LIBREADLINE */
|
||||
|
||||
/* Define to 1 if you have the <limits.h> header file. */
|
||||
#define HAVE_LIMITS_H 1
|
||||
|
||||
/* Define to 1 if you have the `localtime' function. */
|
||||
#define HAVE_LOCALTIME 1
|
||||
|
||||
/* Define to 1 if you have the <lzma.h> header file. */
|
||||
/* #undef HAVE_LZMA_H */
|
||||
|
||||
/* Define to 1 if you have the <malloc.h> header file. */
|
||||
#define HAVE_MALLOC_H 1
|
||||
|
||||
/* Define to 1 if you have the <math.h> header file. */
|
||||
#define HAVE_MATH_H 1
|
||||
|
||||
/* Define to 1 if you have the <memory.h> header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if you have the `mmap' function. */
|
||||
#define HAVE_MMAP 1
|
||||
|
||||
/* Define to 1 if you have the `munmap' function. */
|
||||
#define HAVE_MUNMAP 1
|
||||
|
||||
/* mmap() is no good without munmap() */
|
||||
#if defined(HAVE_MMAP) && !defined(HAVE_MUNMAP)
|
||||
# undef /**/ HAVE_MMAP
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
|
||||
/* #undef HAVE_NDIR_H */
|
||||
|
||||
/* Define to 1 if you have the <netdb.h> header file. */
|
||||
#define HAVE_NETDB_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/in.h> header file. */
|
||||
#define HAVE_NETINET_IN_H 1
|
||||
|
||||
/* Define to 1 if you have the <poll.h> header file. */
|
||||
#define HAVE_POLL_H 1
|
||||
|
||||
/* Define to 1 if you have the `printf' function. */
|
||||
#define HAVE_PRINTF 1
|
||||
|
||||
/* Define if <pthread.h> is there */
|
||||
#define HAVE_PTHREAD_H /**/
|
||||
|
||||
/* Define to 1 if you have the `putenv' function. */
|
||||
#define HAVE_PUTENV 1
|
||||
|
||||
/* Define to 1 if you have the `rand' function. */
|
||||
#define HAVE_RAND 1
|
||||
|
||||
/* Define to 1 if you have the `rand_r' function. */
|
||||
#define HAVE_RAND_R 1
|
||||
|
||||
/* Define to 1 if you have the <resolv.h> header file. */
|
||||
#define HAVE_RESOLV_H 1
|
||||
|
||||
/* Have shl_load based dso */
|
||||
/* #undef HAVE_SHLLOAD */
|
||||
|
||||
/* Define to 1 if you have the `signal' function. */
|
||||
#define HAVE_SIGNAL 1
|
||||
|
||||
/* Define to 1 if you have the <signal.h> header file. */
|
||||
#define HAVE_SIGNAL_H 1
|
||||
|
||||
/* Define to 1 if you have the `snprintf' function. */
|
||||
#define HAVE_SNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `sprintf' function. */
|
||||
#define HAVE_SPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `srand' function. */
|
||||
#define HAVE_SRAND 1
|
||||
|
||||
/* Define to 1 if you have the `sscanf' function. */
|
||||
#define HAVE_SSCANF 1
|
||||
|
||||
/* Define to 1 if you have the `stat' function. */
|
||||
#define HAVE_STAT 1
|
||||
|
||||
/* Define to 1 if you have the <stdarg.h> header file. */
|
||||
#define HAVE_STDARG_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the `strftime' function. */
|
||||
#define HAVE_STRFTIME 1
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
|
||||
*/
|
||||
/* #undef HAVE_SYS_DIR_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/mman.h> header file. */
|
||||
#define HAVE_SYS_MMAN_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
|
||||
*/
|
||||
/* #undef HAVE_SYS_NDIR_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/select.h> header file. */
|
||||
#define HAVE_SYS_SELECT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/socket.h> header file. */
|
||||
#define HAVE_SYS_SOCKET_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/timeb.h> header file. */
|
||||
#define HAVE_SYS_TIMEB_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/time.h> header file. */
|
||||
#define HAVE_SYS_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the `time' function. */
|
||||
#define HAVE_TIME 1
|
||||
|
||||
/* Define to 1 if you have the <time.h> header file. */
|
||||
#define HAVE_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Whether va_copy() is available */
|
||||
#define HAVE_VA_COPY 1
|
||||
|
||||
/* Define to 1 if you have the `vfprintf' function. */
|
||||
#define HAVE_VFPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `vsnprintf' function. */
|
||||
#define HAVE_VSNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `vsprintf' function. */
|
||||
#define HAVE_VSPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the <zlib.h> header file. */
|
||||
/* #undef HAVE_ZLIB_H */
|
||||
|
||||
/* Whether __va_copy() is available */
|
||||
/* #undef HAVE___VA_COPY */
|
||||
|
||||
/* Define as const if the declaration of iconv() needs const. */
|
||||
#define ICONV_CONST
|
||||
|
||||
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||
#define LT_OBJDIR ".libs/"
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "libxml2"
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT ""
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME ""
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING ""
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME ""
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION ""
|
||||
|
||||
/* Type cast for the send() function 2nd arg */
|
||||
#define SEND_ARG2_CAST /**/
|
||||
|
||||
/* Define to 1 if you have the ANSI C header files. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* Support for IPv6 */
|
||||
#define SUPPORT_IP6 /**/
|
||||
|
||||
/* Define if va_list is an array type */
|
||||
#define VA_LIST_IS_ARRAY 1
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "2.9.8"
|
||||
|
||||
/* Determine what socket length (socklen_t) data type is */
|
||||
#define XML_SOCKLEN_T socklen_t
|
||||
|
||||
/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
|
||||
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
|
||||
#define below would cause a syntax error. */
|
||||
/* #undef _UINT32_T */
|
||||
|
||||
/* ss_family is not defined here, use __ss_family instead */
|
||||
/* #undef ss_family */
|
||||
|
||||
/* Define to the type of an unsigned integer type of width exactly 32 bits if
|
||||
such a type exists and the standard includes do not define it. */
|
||||
/* #undef uint32_t */
|
|
@ -0,0 +1,501 @@
|
|||
/*
|
||||
* Summary: compile-time version information
|
||||
* Description: compile-time version information for the XML library
|
||||
*
|
||||
* Copy: See Copyright for the status of this software.
|
||||
*
|
||||
* Author: Daniel Veillard
|
||||
*/
|
||||
|
||||
#ifndef __XML_VERSION_H__
|
||||
#define __XML_VERSION_H__
|
||||
|
||||
#include <libxml/xmlexports.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* use those to be sure nothing nasty will happen if
|
||||
* your library and includes mismatch
|
||||
*/
|
||||
#ifndef LIBXML2_COMPILING_MSCCDEF
|
||||
XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
|
||||
#endif /* LIBXML2_COMPILING_MSCCDEF */
|
||||
|
||||
/**
|
||||
* LIBXML_DOTTED_VERSION:
|
||||
*
|
||||
* the version string like "1.2.3"
|
||||
*/
|
||||
#define LIBXML_DOTTED_VERSION "2.10.3"
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION:
|
||||
*
|
||||
* the version number: 1.2.3 value is 10203
|
||||
*/
|
||||
#define LIBXML_VERSION 21003
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION_STRING:
|
||||
*
|
||||
* the version number string, 1.2.3 value is "10203"
|
||||
*/
|
||||
#define LIBXML_VERSION_STRING "21003"
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION_EXTRA:
|
||||
*
|
||||
* extra version information, used to show a git commit description
|
||||
*/
|
||||
#define LIBXML_VERSION_EXTRA ""
|
||||
|
||||
/**
|
||||
* LIBXML_TEST_VERSION:
|
||||
*
|
||||
* Macro to check that the libxml version in use is compatible with
|
||||
* the version the software has been compiled against
|
||||
*/
|
||||
#define LIBXML_TEST_VERSION xmlCheckVersion(21003);
|
||||
|
||||
#ifndef VMS
|
||||
#if 0
|
||||
/**
|
||||
* WITH_TRIO:
|
||||
*
|
||||
* defined if the trio support need to be configured in
|
||||
*/
|
||||
#define WITH_TRIO
|
||||
#else
|
||||
/**
|
||||
* WITHOUT_TRIO:
|
||||
*
|
||||
* defined if the trio support should not be configured in
|
||||
*/
|
||||
#define WITHOUT_TRIO
|
||||
#endif
|
||||
#else /* VMS */
|
||||
/**
|
||||
* WITH_TRIO:
|
||||
*
|
||||
* defined if the trio support need to be configured in
|
||||
*/
|
||||
#define WITH_TRIO 1
|
||||
#endif /* VMS */
|
||||
|
||||
/**
|
||||
* LIBXML_THREAD_ENABLED:
|
||||
*
|
||||
* Whether the thread support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_THREAD_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_THREAD_ALLOC_ENABLED:
|
||||
*
|
||||
* Whether the allocation hooks are per-thread
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_THREAD_ALLOC_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_TREE_ENABLED:
|
||||
*
|
||||
* Whether the DOM like tree manipulation API support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_TREE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_OUTPUT_ENABLED:
|
||||
*
|
||||
* Whether the serialization/saving support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_OUTPUT_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_PUSH_ENABLED:
|
||||
*
|
||||
* Whether the push parsing interfaces are configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_PUSH_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_READER_ENABLED:
|
||||
*
|
||||
* Whether the xmlReader parsing interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_READER_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_PATTERN_ENABLED:
|
||||
*
|
||||
* Whether the xmlPattern node selection interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_PATTERN_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_WRITER_ENABLED:
|
||||
*
|
||||
* Whether the xmlWriter saving interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_WRITER_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SAX1_ENABLED:
|
||||
*
|
||||
* Whether the older SAX1 interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SAX1_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_FTP_ENABLED:
|
||||
*
|
||||
* Whether the FTP support is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_FTP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_HTTP_ENABLED:
|
||||
*
|
||||
* Whether the HTTP support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_HTTP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_VALID_ENABLED:
|
||||
*
|
||||
* Whether the DTD validation support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_VALID_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_HTML_ENABLED:
|
||||
*
|
||||
* Whether the HTML support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_HTML_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_LEGACY_ENABLED:
|
||||
*
|
||||
* Whether the deprecated APIs are compiled in for compatibility
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_LEGACY_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_C14N_ENABLED:
|
||||
*
|
||||
* Whether the Canonicalization support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_C14N_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_CATALOG_ENABLED:
|
||||
*
|
||||
* Whether the Catalog support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_CATALOG_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPATH_ENABLED:
|
||||
*
|
||||
* Whether XPath is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XPATH_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPTR_ENABLED:
|
||||
*
|
||||
* Whether XPointer is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XPTR_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPTR_LOCS_ENABLED:
|
||||
*
|
||||
* Whether support for XPointer locations is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_XPTR_LOCS_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XINCLUDE_ENABLED:
|
||||
*
|
||||
* Whether XInclude is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XINCLUDE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ICONV_ENABLED:
|
||||
*
|
||||
* Whether iconv support is available
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_ICONV_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ICU_ENABLED:
|
||||
*
|
||||
* Whether icu support is available
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_ICU_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ISO8859X_ENABLED:
|
||||
*
|
||||
* Whether ISO-8859-* support is made available in case iconv is not
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_ISO8859X_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_DEBUG_ENABLED:
|
||||
*
|
||||
* Whether Debugging module is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_DEBUG_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* DEBUG_MEMORY_LOCATION:
|
||||
*
|
||||
* Whether the memory debugging is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define DEBUG_MEMORY_LOCATION
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_DEBUG_RUNTIME:
|
||||
*
|
||||
* Whether the runtime debugging is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_DEBUG_RUNTIME
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_UNICODE_ENABLED:
|
||||
*
|
||||
* Whether the Unicode related interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_UNICODE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_REGEXP_ENABLED:
|
||||
*
|
||||
* Whether the regular expressions interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_REGEXP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_AUTOMATA_ENABLED:
|
||||
*
|
||||
* Whether the automata interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_AUTOMATA_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_EXPR_ENABLED:
|
||||
*
|
||||
* Whether the formal expressions interfaces are compiled in
|
||||
*
|
||||
* This code is unused and disabled unconditionally for now.
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_EXPR_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SCHEMAS_ENABLED:
|
||||
*
|
||||
* Whether the Schemas validation interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SCHEMAS_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SCHEMATRON_ENABLED:
|
||||
*
|
||||
* Whether the Schematron validation interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SCHEMATRON_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_MODULES_ENABLED:
|
||||
*
|
||||
* Whether the module interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_MODULES_ENABLED
|
||||
/**
|
||||
* LIBXML_MODULE_EXTENSION:
|
||||
*
|
||||
* the string suffix used by dynamic modules (usually shared libraries)
|
||||
*/
|
||||
#define LIBXML_MODULE_EXTENSION ".so"
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ZLIB_ENABLED:
|
||||
*
|
||||
* Whether the Zlib support is compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_ZLIB_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_LZMA_ENABLED:
|
||||
*
|
||||
* Whether the Lzma support is compiled in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_LZMA_ENABLED
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
/**
|
||||
* ATTRIBUTE_UNUSED:
|
||||
*
|
||||
* Macro used to signal to GCC unused function parameters
|
||||
*/
|
||||
|
||||
#ifndef ATTRIBUTE_UNUSED
|
||||
# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7)))
|
||||
# define ATTRIBUTE_UNUSED __attribute__((unused))
|
||||
# else
|
||||
# define ATTRIBUTE_UNUSED
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ATTR_ALLOC_SIZE:
|
||||
*
|
||||
* Macro used to indicate to GCC this is an allocator function
|
||||
*/
|
||||
|
||||
#ifndef LIBXML_ATTR_ALLOC_SIZE
|
||||
# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))))
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x)))
|
||||
# else
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
# endif
|
||||
#else
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ATTR_FORMAT:
|
||||
*
|
||||
* Macro used to indicate to GCC the parameter are printf like
|
||||
*/
|
||||
|
||||
#ifndef LIBXML_ATTR_FORMAT
|
||||
# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)))
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args)))
|
||||
# else
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
# endif
|
||||
#else
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
#endif
|
||||
|
||||
#ifndef XML_DEPRECATED
|
||||
# ifdef IN_LIBXML
|
||||
# define XML_DEPRECATED
|
||||
# else
|
||||
/* Available since at least GCC 3.1 */
|
||||
# define XML_DEPRECATED __attribute__((deprecated))
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#else /* ! __GNUC__ */
|
||||
/**
|
||||
* ATTRIBUTE_UNUSED:
|
||||
*
|
||||
* Macro used to signal to GCC unused function parameters
|
||||
*/
|
||||
#define ATTRIBUTE_UNUSED
|
||||
/**
|
||||
* LIBXML_ATTR_ALLOC_SIZE:
|
||||
*
|
||||
* Macro used to indicate to GCC this is an allocator function
|
||||
*/
|
||||
#define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
/**
|
||||
* LIBXML_ATTR_FORMAT:
|
||||
*
|
||||
* Macro used to indicate to GCC the parameter are printf like
|
||||
*/
|
||||
#define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
/**
|
||||
* XML_DEPRECATED:
|
||||
*
|
||||
* Macro used to indicate that a function, variable, type or struct member
|
||||
* is deprecated.
|
||||
*/
|
||||
#ifndef XML_DEPRECATED
|
||||
#define XML_DEPRECATED
|
||||
#endif
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
#endif
|
|
@ -109,7 +109,7 @@ If you are using Maven to manage your project, simply add the following dependen
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -15,6 +15,19 @@ When inserting data using parameter binding, it can avoid the resource consumpti
|
|||
|
||||
**Tips: It is recommended to use parameter binding for data insertion**
|
||||
|
||||
:::note
|
||||
We only recommend using the following two forms of SQL for parameter binding data insertion:
|
||||
|
||||
```sql
|
||||
a. Subtables already exists:
|
||||
1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?)
|
||||
b. Automatic table creation on insert:
|
||||
1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?)
|
||||
2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?)
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
Next, we continue to use smart meters as an example to demonstrate the efficient writing functionality of parameter binding with various language connectors:
|
||||
|
||||
1. Prepare a parameterized SQL insert statement for inserting data into the supertable `meters`. This statement allows dynamically specifying subtable names, tags, and column values.
|
||||
|
@ -28,8 +41,15 @@ Next, we continue to use smart meters as an example to demonstrate the efficient
|
|||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
There are two kinds of interfaces for parameter binding: one is the standard JDBC interface, and the other is an extended interface. The extended interface offers better performance.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}}
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingStdInterfaceDemo.java:para_bind}}
|
||||
```
|
||||
|
||||
```java
|
||||
{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingExtendInterfaceDemo.java:para_bind}}
|
||||
```
|
||||
|
||||
This is a [more detailed parameter binding example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java)
|
||||
|
@ -91,14 +111,24 @@ This is a [more detailed parameter binding example](https://github.com/taosdata/
|
|||
<TabItem label="Python" value="python">
|
||||
|
||||
```python
|
||||
{{#include docs/examples/python/stmt_native.py}}
|
||||
{{#include docs/examples/python/stmt2_native.py}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Go" value="go">
|
||||
|
||||
The example code for binding parameters with stmt2 (Go connector v3.6.0 and above, TDengine v3.3.5.0 and above) is as follows:
|
||||
|
||||
```go
|
||||
{{#include docs/examples/go/stmt2/native/main.go}}
|
||||
```
|
||||
|
||||
The example code for binding parameters with stmt is as follows:
|
||||
|
||||
```go
|
||||
{{#include docs/examples/go/stmt/native/main.go}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Rust" value="rust">
|
||||
|
||||
|
|
|
@ -31,12 +31,12 @@ There are many parameters for creating consumers, which flexibly support various
|
|||
|
||||
| Parameter Name | Type | Description | Remarks |
|
||||
| :-----------------------: | :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `td.connect.ip` | string | Server IP address | |
|
||||
| `td.connect.ip` | string | FQDN of Server | ip or host name |
|
||||
| `td.connect.user` | string | Username | |
|
||||
| `td.connect.pass` | string | Password | |
|
||||
| `td.connect.port` | integer | Server port number | |
|
||||
| `group.id` | string | Consumer group ID, the same consumer group shares consumption progress | <br />**Required**. Maximum length: 192.<br />Each topic can have up to 100 consumer groups |
|
||||
| `client.id` | string | Client ID | Maximum length: 192 |
|
||||
| `group.id` | string | Consumer group ID, the same consumer group shares consumption progress | <br />**Required**. Maximum length: 192,excess length will be cut off.<br />Each topic can have up to 100 consumer groups |
|
||||
| `client.id` | string | Client ID | Maximum length: 255, excess length will be cut off. |
|
||||
| `auto.offset.reset` | enum | Initial position of the consumer group subscription | <br />`earliest`: default(version < 3.2.0.0); subscribe from the beginning; <br/>`latest`: default(version >= 3.2.0.0); only subscribe from the latest data; <br/>`none`: cannot subscribe without a committed offset |
|
||||
| `enable.auto.commit` | boolean | Whether to enable automatic consumption point submission, true: automatic submission, client application does not need to commit; false: client application needs to commit manually | Default is true |
|
||||
| `auto.commit.interval.ms` | integer | Time interval for automatically submitting consumption records, in milliseconds | Default is 5000 |
|
||||
|
|
|
@ -17,7 +17,9 @@ TDengine is designed for various writing scenarios, and many of these scenarios
|
|||
|
||||
```sql
|
||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||
SHOW COMPACTS [compact_id];
|
||||
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'];
|
||||
SHOW COMPACTS;
|
||||
SHOW COMPACT compact_id;
|
||||
KILL COMPACT compact_id;
|
||||
```
|
||||
|
||||
|
|
|
@ -1,14 +1,19 @@
|
|||
---
|
||||
title: Data Backup and Restoration
|
||||
slug: /operations-and-maintenance/back-up-and-restore-data
|
||||
slug: /operations-and-maintenance/data-backup-and-restoration
|
||||
---
|
||||
|
||||
To prevent data loss and accidental deletions, TDengine provides comprehensive features such as data backup, restoration, fault tolerance, and real-time synchronization of remote data to ensure the security of data storage. This section briefly explains the backup and restoration functions.
|
||||
import Image from '@theme/IdealImage';
|
||||
import imgBackup from '../assets/data-backup-01.png';
|
||||
|
||||
You can back up the data in your TDengine cluster and restore it in the event that data is lost or damaged.
|
||||
|
||||
## Data Backup and Restoration Using taosdump
|
||||
|
||||
taosdump is an open-source tool that supports backing up data from a running TDengine cluster and restoring the backed-up data to the same or another running TDengine cluster. taosdump can back up the database as a logical data unit or back up data records within a specified time period in the database. When using taosdump, you can specify the directory path for data backup. If no directory path is specified, taosdump will default to backing up the data in the current directory.
|
||||
|
||||
### Back Up Data with taosdump
|
||||
|
||||
Below is an example of using taosdump to perform data backup.
|
||||
|
||||
```shell
|
||||
|
@ -19,6 +24,8 @@ After executing the above command, taosdump will connect to the TDengine cluster
|
|||
|
||||
When using taosdump, if the specified storage path already contains data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means the same storage path can only be used for one backup. If you see related prompts, please operate carefully to avoid accidental data loss.
|
||||
|
||||
### Restore Data with taosdump
|
||||
|
||||
To restore data files from a specified local file path to a running TDengine cluster, you can execute the taosdump command by specifying command-line parameters and the data file path. Below is an example code for taosdump performing data restoration.
|
||||
|
||||
```shell
|
||||
|
@ -27,25 +34,62 @@ taosdump -i /file/path -h localhost -P 6030
|
|||
|
||||
After executing the above command, taosdump will connect to the TDengine cluster at localhost:6030 and restore the data files from /file/path to the TDengine cluster.
|
||||
|
||||
## Data Backup and Restoration Based on TDengine Enterprise
|
||||
## Data Backup and Restoration in TDengine Enterprise
|
||||
|
||||
TDengine Enterprise provides an efficient incremental backup feature, with the following process.
|
||||
TDengine Enterprise implements incremental backup and recovery of data by using data subscription. The backup and recovery functions of TDengine Enterprise include the following concepts:
|
||||
|
||||
Step 1, access the taosExplorer service through a browser, usually at the port 6060 of the IP address where the TDengine cluster is located, such as `http://localhost:6060`.
|
||||
1. Incremental data backup: Based on TDengine's data subscription function, all data changes of **the backup object** (including: addition, modification, deletion, metadata change, etc.) are recorded to generate a backup file.
|
||||
2. Data recovery: Use the backup file generated by incremental data backup to restore **the backup object** to a specified point in time.
|
||||
3. Backup object: The object that the user backs up can be a **database** or a **supertable**.
|
||||
4. Backup plan: The user creates a periodic backup task for the backup object. The backup plan starts at a specified time point and periodically executes the backup task at intervals of **the backup cycle. Each backup task generates a** **backup point** .
|
||||
5. Backup point: Each time a backup task is executed, a set of backup files is generated. They correspond to a time point, called **a backup point** . The first backup point is called **the initial backup point** .
|
||||
6. Restore task: The user selects a backup point in the backup plan and creates a restore task. The restore task starts from **the initial backup point** and plays back the data changes in **the backup file** one by one until the specified backup point ends.
|
||||
|
||||
Step 2, in the "System Management - Backup" page of the taosExplorer service, add a new data backup task, fill in the database name and backup storage file path in the task configuration information, and start the data backup after completing the task creation. Three parameters can be configured on the data backup configuration page:
|
||||
### Incremental Backup Example
|
||||
|
||||
- Backup cycle: Required, configure the time interval for each data backup execution, which can be selected from a dropdown menu to execute once every day, every 7 days, or every 30 days. After configuration, a data backup task will be initiated at 0:00 of the corresponding backup cycle;
|
||||
- Database: Required, configure the name of the database to be backed up (the database's wal_retention_period parameter must be greater than 0);
|
||||
- Directory: Required, configure the path in the running environment of taosX where the data will be backed up, such as `/root/data_backup`;
|
||||
<figure>
|
||||
<Image img={imgBackup} alt="Incremental backup process"/>
|
||||
<figcaption>Figure 1. Incremental backup process</figcaption>
|
||||
</figure>
|
||||
|
||||
Step 3, after the data backup task is completed, find the created data backup task in the list of created tasks on the same page, and directly perform one-click restoration to restore the data to TDengine.
|
||||
1. The user creates a backup plan to execute the backup task every 1 day starting from 2024-08-27 00:00:00 .
|
||||
2. The first backup task was executed at 2024-08-27 00:00:00, generating an initial backup point .
|
||||
3. After that, the backup task is executed every 1 day, and multiple backup points are generated .
|
||||
4. Users can select a backup point and create a restore task .
|
||||
5. The restore task starts from the initial backup point, applies the backup points one by one, and restores to the specified backup point.
|
||||
|
||||
Compared to taosdump, if the same data is backed up multiple times in the specified storage path, since TDengine Enterprise not only has high backup efficiency but also implements incremental processing, each backup task will be completed quickly. As taosdump always performs full backups, TDengine Enterprise can significantly reduce system overhead in scenarios with large data volumes and is more convenient.
|
||||
### Back Up Data in TDengine Enterprise
|
||||
|
||||
**Common Error Troubleshooting**
|
||||
1. In a web browser, open the taosExplorer interface for TDengine. This interface is located on port 6060 on the hostname or IP address running TDengine.
|
||||
2. In the main menu on the left, click **Management** and open the **Backup** tab.
|
||||
3. Under **Backup Plan**, click **Create New Backup** to define your backup plan.
|
||||
1. **Database:** Select the database that you want to backup.
|
||||
2. **Super Table:** (Optional) Select the supertable that you want to backup. If you do not select a supertable, all data in the database is backed up.
|
||||
3. **Next execution time:** Enter the date and time when you want to perform the initial backup for this backup plan. If you specify a date and time in the past, the initial backup is performed immediately.
|
||||
4. **Backup Cycle:** Specify how often you want to perform incremental backups. The value of this field must be less than the value of `WAL_RETENTION_PERIOD` for the specified database.
|
||||
5. **Retry times:** Enter how many times you want to retry a backup task that has failed, provided that the specific failure might be resolved by retrying.
|
||||
6. **Retry interval:** Enter the delay in seconds between retry attempts.
|
||||
7. **Directory:** Enter the full path of the directory in which you want to store backup files.
|
||||
8. **Backup file max size:** Enter the maximum size of a single backup file. If the total size of your backup exceeds this number, the backup is split into multiple files.
|
||||
9. **Compression level:** Select **fastest** for the fastest performance but lowest compression ratio, **best** for the highest compression ratio but slowest performance, or **balanced** for a combination of performance and compression.
|
||||
|
||||
1. If the task fails to start and reports the following error:
|
||||
4. Click **Confirm** to create the backup plan.
|
||||
|
||||
You can view your backup plans and modify, clone, or delete them using the buttons in the **Operation** columns. Click **Refresh** to update the status of your plans. Note that you must stop a backup plan before you can delete it. You can also click **View** in the **Backup File** column to view the backup record points and files created by each plan.
|
||||
|
||||
### Restore Data in TDengine Enterprise
|
||||
|
||||
1. Locate the backup plan containing data that you want to restore and click **View** in the **Backup File** column.
|
||||
2. Determine the backup record point to which you want to restore and click the Restore icon in the **Operation** column.
|
||||
3. Select the backup file timestamp and target database and click **Confirm**.
|
||||
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Port Access Exception
|
||||
|
||||
A port access exception is indicated by the following error:
|
||||
|
||||
```text
|
||||
Error: tmq to td task exec error
|
||||
|
@ -54,9 +98,11 @@ Caused by:
|
|||
[0x000B] Unable to establish connection
|
||||
```
|
||||
|
||||
The cause is an abnormal connection to the data source port, check whether the data source FQDN is connected and whether port 6030 is accessible.
|
||||
If you encounter this error, check whether the data source FQDN is connected and whether port 6030 is listening and accessible.
|
||||
|
||||
2. If using a WebSocket connection, the task fails to start and reports the following error:
|
||||
### Connection Issues
|
||||
|
||||
A connection issue is indicated by the task failing to start and reporting the following error:
|
||||
|
||||
```text
|
||||
Error: tmq to td task exec error
|
||||
|
@ -67,15 +113,16 @@ Caused by:
|
|||
2: failed to lookup address information: Temporary failure in name resolution
|
||||
```
|
||||
|
||||
When using a WebSocket connection, you may encounter various types of errors, which can be seen after "Caused by". Here are some possible errors:
|
||||
The following are some possible errors for WebSocket connections:
|
||||
- "Temporary failure in name resolution": DNS resolution error. Check whether the specified IP address or FQDN can be accessed normally.
|
||||
- "IO error: Connection refused (os error 111)": Port access failed. Check whether the port is configured correctly and is enabled and accessible.
|
||||
- "IO error: received corrupt message": Message parsing failed. This may be because SSL was enabled using the WSS method, but the source port is not supported.
|
||||
- "HTTP error: *": Confirm that you are connecting to the correct taosAdapter port and that your LSB/Nginx/Proxy has been configured correctly.
|
||||
- "WebSocket protocol error: Handshake not finished": WebSocket connection error. This is typically caused by an incorrectly configured port.
|
||||
|
||||
- "Temporary failure in name resolution": DNS resolution error, check if the IP or FQDN can be accessed normally.
|
||||
- "IO error: Connection refused (os error 111)": Port access failure, check if the port is configured correctly or if it is open and accessible.
|
||||
- "IO error: received corrupt message": Message parsing failed, possibly because SSL was enabled using wss, but the source port does not support it.
|
||||
- "HTTP error: *": Possibly connected to the wrong taosAdapter port or incorrect LSB/Nginx/Proxy configuration.
|
||||
- "WebSocket protocol error: Handshake not finished": WebSocket connection error, usually because the configured port is incorrect.
|
||||
### WAL Configuration
|
||||
|
||||
3. If the task fails to start and reports the following error:
|
||||
A WAL configuration issue is indicated by the task failing to start and reporting the following error:
|
||||
|
||||
```text
|
||||
Error: tmq to td task exec error
|
||||
|
@ -84,11 +131,8 @@ Caused by:
|
|||
[0x038C] WAL retention period is zero
|
||||
```
|
||||
|
||||
This is due to incorrect WAL configuration in the source database, preventing subscription.
|
||||
|
||||
Solution:
|
||||
Modify the data WAL configuration:
|
||||
To resolve this error, modify the WAL retention period for the affected database:
|
||||
|
||||
```sql
|
||||
alter database test wal_retention_period 3600;
|
||||
ALTER DATABASE test WAL_RETENTION_PERIOD 3600;
|
||||
```
|
||||
|
|
|
@ -0,0 +1,376 @@
|
|||
---
|
||||
sidebar_label: Flink
|
||||
title: TDengine Flink Connector
|
||||
---
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
Apache Flink is an open-source distributed stream batch integrated processing framework supported by the Apache Software Foundation, which can be used for many big data processing scenarios such as stream processing, batch processing, complex event processing, real-time data warehouse construction, and providing real-time data support for machine learning. At the same time, Flink has a wealth of connectors and various tools that can interface with numerous different types of data sources to achieve data reading and writing. In the process of data processing, Flink also provides a series of reliable fault-tolerant mechanisms, effectively ensuring that tasks can run stably and continuously even in the event of unexpected situations.
|
||||
|
||||
With the help of TDengine's Flink connector, Apache Flink can seamlessly integrate with the TDengine database. On the one hand, it can accurately store the results obtained after complex calculations and deep analysis into the TDengine database, achieving efficient storage and management of data; On the other hand, it is also possible to quickly and stably read massive amounts of data from the TDengine database, and conduct comprehensive and in-depth analysis and processing on this basis, fully tapping into the potential value of the data, providing strong data support and scientific basis for enterprise decision-making, greatly improving the efficiency and quality of data processing, and enhancing the competitiveness and innovation ability of enterprises in the digital age.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Prepare the following environment:
|
||||
|
||||
- TDengine cluster has been deployed and is running normally (both enterprise and community versions are available)
|
||||
- TaosAdapter can run normally.
|
||||
- Apache Flink v1.19.0 or above is installed. Please refer to the installation of Apache Flink [Official documents](https://flink.apache.org/)
|
||||
|
||||
## Supported platforms
|
||||
|
||||
Flink Connector supports all platforms that can run Flink 1.19 and above versions.
|
||||
|
||||
## Version History
|
||||
|
||||
| Flink Connector Version | Major Changes | TDengine Version|
|
||||
|-------------------------| ------------------------------------ | ---------------- |
|
||||
| 2.0.1 | Sink supports writing types from Rowdata implementations.| - |
|
||||
| 2.0.0 | 1.Support SQL queries on data in TDengine database. <br/> 2. Support CDC subscription to data in TDengine database.<br/> 3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher|
|
||||
| 1.0.0 | Support Sink function to write data from other sources to TDengine in the future.| 3.3.2.0 and higher|
|
||||
|
||||
## Exception and error codes
|
||||
|
||||
After the task execution fails, check the Flink task execution log to confirm the reason for the failure
|
||||
Please refer to:
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------------- |------------------------------------------------------- | -------------------- |
|
||||
|0xa000 | connection param error | Connector parameter error.
|
||||
|0xa001 | the groupid parameter of CDC is incorrect | The groupid parameter of CDC is incorrect.|
|
||||
|0xa002 | wrong topic parameter for CDC | The topic parameter for CDC is incorrect.|
|
||||
|0xa010 | database name configuration error | database name configuration error.|
|
||||
|0xa011 | table name configuration error | Table name configuration error.|
|
||||
|0xa012 | no data was obtained from the data source | Failed to retrieve data from the data source.|
|
||||
|0xa013 | value.deserializer parameter not set | No serialization method set.|
|
||||
|0xa014 | list of column names set incorrectly | List of column names for target table not set. |
|
||||
|0x2301 | connection already closed | The connection has been closed. Check the connection status or create a new connection to execute the relevant instructions.|
|
||||
|0x2302 | this operation is NOT supported currently | The current interface is not supported, you can switch to other connection methods.|
|
||||
|0x2303 | invalid variables | The parameter is invalid. Please check the corresponding interface specification and adjust the parameter type and size.|
|
||||
|0x2304 | statement is closed | Statement has already been closed. Please check if the statement is closed and reused, or if the connection is working properly.|
|
||||
|0x2305 | resultSet is closed | The ResultSet has been released. Please check if the ResultSet has been released and used again.|
|
||||
|0x230d | parameter index out of range | parameter out of range, please check the reasonable range of the parameter.|
|
||||
|0x230e | connection already closed | The connection has been closed. Please check if the connection is closed and used again, or if the connection is working properly.|
|
||||
|0x230f | unknown SQL type in TDengine | Please check the Data Type types supported by TDengine.|
|
||||
|0x2315 | unknown tao type in TDengine | Did the correct TDengine data type be specified when converting TDengine data type to JDBC data type.|
|
||||
|0x2319 | user is required | Username information is missing when creating a connection.|
|
||||
|0x231a | password is required | Password information is missing when creating a connection.|
|
||||
|0x231d | can't create connection with server within | Increase connection time by adding the parameter httpConnectTimeout, or check the connection status with taosAdapter.|
|
||||
|0x231e | failed to complete the task within the specified time | Increase execution time by adding the parameter messageWaitTimeout, or check the connection with taosAdapter.|
|
||||
|0x2352 | unsupported encoding | An unsupported character encoding set was specified under the local connection.|
|
||||
|0x2353 | internal error of database, Please see taoslog for more details | An error occurred while executing prepareStatement on the local connection. Please check the taoslog for problem localization.|
|
||||
|0x2354 | connection is NULL | Connection has already been closed while executing the command on the local connection. Please check the connection with TDengine.|
|
||||
|0x2355 | result set is NULL | Local connection to obtain result set, result set exception, please check connection status and retry.|
|
||||
|0x2356 | invalid num of fields | The meta information obtained from the local connection result set does not match.|
|
||||
|0x2357 | empty SQL string | Fill in the correct SQL for execution.|
|
||||
|0x2371 | consumer properties must not be null | When creating a subscription, the parameter is empty. Please fill in the correct parameter.|
|
||||
|0x2375 | topic reference has been destroyed | During the process of creating a data subscription, the topic reference was released. Please check the connection with TDengine.|
|
||||
|0x2376 | failed to set consumer topic, Topic name is empty | During the process of creating a data subscription, the subscription topic name is empty. Please check if the specified topic name is filled in correctly.|
|
||||
|0x2377 | consumer reference has been destroyed | The subscription data transmission channel has been closed, please check the connection with TDengine.|
|
||||
|0x2378 | consumer create error | Failed to create data subscription. Please check the taos log based on the error message to locate the problem.|
|
||||
|0x237a | vGroup not found in result set VGroup | Not assigned to the current consumer, due to the Rebalance mechanism, the relationship between Consumer and VGroup is not bound.|
|
||||
|
||||
## Data type mapping
|
||||
|
||||
TDengine currently supports timestamp, number, character, and boolean types, and the corresponding type conversions with Flink RowData Type are as follows:
|
||||
|
||||
| TDengine DataType | Flink RowDataType |
|
||||
| ----------------- | ------------------ |
|
||||
| TIMESTAMP | TimestampData |
|
||||
| INT | Integer |
|
||||
| BIGINT | Long |
|
||||
| FLOAT | Float |
|
||||
| DOUBLE | Double |
|
||||
| SMALLINT | Short |
|
||||
| TINYINT | Byte |
|
||||
| BOOL | Boolean |
|
||||
| BINARY | byte[] |
|
||||
| NCHAR | StringData |
|
||||
| JSON | StringData |
|
||||
| VARBINARY | byte[] |
|
||||
| GEOMETRY | byte[] |
|
||||
|
||||
## Instructions for use
|
||||
|
||||
### Flink Semantic Selection Instructions
|
||||
|
||||
The semantic reason for using At Least One (at least once) is:
|
||||
|
||||
- TDengine currently does not support transactions and cannot perform frequent checkpoint operations and complex transaction coordination.
|
||||
- Due to TDengine's use of timestamps as primary keys, downstream operators of duplicate data can perform filtering operations to avoid duplicate calculations.
|
||||
- Using At Least One (at least once) to ensure high data processing performance and low data latency, the setting method is as follows:
|
||||
|
||||
Instructions:
|
||||
|
||||
```java
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.enableCheckpointing(5000);
|
||||
env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE);
|
||||
```
|
||||
|
||||
If using Maven to manage a project, simply add the following dependencies in pom.xml.
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.taosdata.flink</groupId>
|
||||
<artifactId>flink-connector-tdengine</artifactId>
|
||||
<version>2.0.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
The parameters for establishing a connection include URL and Properties.
|
||||
The URL specification format is:
|
||||
|
||||
`jdbc: TAOS-WS://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&timezone={timezone}]`
|
||||
|
||||
Parameter description:
|
||||
|
||||
- User: Login TDengine username, default value is' root '.
|
||||
- Password: User login password, default value 'taosdata'.
|
||||
- database_name: database name。
|
||||
- timezone: time zone。
|
||||
- HttpConnectTimeout: The connection timeout time, measured in milliseconds, with a default value of 60000.
|
||||
- MessageWaitTimeout: The timeout period for a message, measured in milliseconds, with a default value of 60000.
|
||||
- UseSSL: Whether SSL is used in the connection.
|
||||
|
||||
### Source
|
||||
|
||||
Source retrieves data from the TDengine database, converts it into a format and type that Flink can handle internally, and reads and distributes it in parallel, providing efficient input for subsequent data processing.
|
||||
By setting the parallelism of the data source, multiple threads can read data from the data source in parallel, improving the efficiency and throughput of data reading, and fully utilizing cluster resources for large-scale data processing capabilities.
|
||||
|
||||
#### Source Properties
|
||||
|
||||
The configuration parameters in Properties are as follows:
|
||||
|
||||
- TDengineConfigParams.PROPERTY_KEY_USER: Login to TDengine username, default value is 'root '.
|
||||
- TDengineConfigParams.PROPERTY_KEY_PASSWORD: User login password, default value 'taosdata'.
|
||||
- TDengineConfigParams.VALUE_DESERIALIZER: The downstream operator receives the result set deserialization method. If the received result set type is `RowData` of `Flink`, it only needs to be set to `RowData`. It is also possible to inherit `TDengineRecordDeserialization` and implement `convert` and `getProducedType` methods, customizing the deserialization method based on `ResultSet` of `SQL`.
|
||||
- TDengineConfigParams.TD_BATCH_MODE: This parameter is used to batch push data to downstream operators. If set to True, when creating the `TDengine Source` object, it is necessary to specify the data type as a `Template` form of the `SourceRecords` type.
|
||||
- TDengineConfigParams.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: Message timeout time, in milliseconds, default value is 60000.
|
||||
- TDengineConfigParams.PROPERTY_KEY_ENABLE_COMPRESSION: Is compression enabled during the transmission process. true: Enable, false: Not enabled. The default is false.
|
||||
- TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable automatic reconnection. true: Enable, false: Not enabled. The default is false.
|
||||
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Automatic reconnection retry interval, in milliseconds, default value 2000. It only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
|
||||
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_RETRY_COUNT: The default value for automatic reconnection retry is 3, which only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
|
||||
- TDengineConfigParams.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Turn off SSL certificate verification. true: Enable, false: Not enabled. The default is false.
|
||||
|
||||
|
||||
#### Split by time
|
||||
|
||||
Users can split the SQL query into multiple subtasks based on time, entering: start time, end time, split interval, time field name. The system will split and obtain data in parallel according to the set interval (time left closed and right open).
|
||||
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:time_interval}}
|
||||
```
|
||||
|
||||
Splitting by Super Table TAG
|
||||
|
||||
Users can split the query SQL into multiple query conditions based on the TAG field of the super table, and the system will split them into subtasks corresponding to each query condition, thereby obtaining data in parallel.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:tag_split}}
|
||||
```
|
||||
|
||||
Classify by table
|
||||
|
||||
Support sharding by inputting multiple super tables or regular tables with the same table structure. The system will split them according to the method of one table, one task, and then obtain data in parallel.
|
||||
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:table_split}}
|
||||
```
|
||||
|
||||
Use Source connector
|
||||
|
||||
The query result is RowData data type example:
|
||||
|
||||
<details>
|
||||
<summary>RowData Source</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:source_test}}
|
||||
```
|
||||
</details>
|
||||
|
||||
Example of batch query results:
|
||||
|
||||
<details>
|
||||
<summary>Batch Source</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:source_batch_test}}
|
||||
```
|
||||
</details>
|
||||
|
||||
Example of custom data type query result:
|
||||
|
||||
<details>
|
||||
<summary>Custom Type Source</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:source_custom_type_test}}
|
||||
```
|
||||
</details>
|
||||
|
||||
- ResultBean is a custom inner class used to define the data type of the Source query results.
|
||||
- ResultSoureDeserialization is a custom inner class that inherits `TDengine` RecordDesrialization and implements convert and getProducedType methods.
|
||||
|
||||
### CDC Data Subscription
|
||||
|
||||
Flink CDC is mainly used to provide data subscription functionality, which can monitor real-time changes in TDengine database data and transmit these changes in the form of data streams to Flink for processing, while ensuring data consistency and integrity.
|
||||
|
||||
Parameter Description
|
||||
|
||||
- TDengineCdcParams.BOOTSTRAP_SERVERS: `ip:port` of the TDengine server, if using WebSocket connection, then it is the `ip:port` where taosAdapter is located.
|
||||
- TDengineCdcParams.CONNECT_USER: Login to TDengine username, default value is 'root '.
|
||||
- TDengineCdcParams.CONNECT_PASS: User login password, default value 'taosdata'.
|
||||
- TDengineCdcParams.POLL_INTERVAL_MS: Pull data interval, default 500ms.
|
||||
- TDengineCdcParams. VALUE_DESERIALIZER: Result set deserialization method, If the received result set type is `RowData` of `Flink`, simply set it to 'RowData'. You can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer`, specify the result set bean, and implement deserialization. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and customize the deserialization method based on the SQL resultSet.
|
||||
- TDengineCdcParams.TMQ_BATCH_MODE: This parameter is used to batch push data to downstream operators. If set to True, when creating the `TDengineCdcSource` object, it is necessary to specify the data type as a template form of the `ConsumerRecords` type.
|
||||
- TDengineCdcParams.GROUP_ID: Consumer group ID, the same consumer group shares consumption progress。Maximum length: 192.
|
||||
- TDengineCdcParams.AUTO_OFFSET_RESET: Initial position of the consumer group subscription ( `earliest` subscribe from the beginning, `latest` subscribe from the latest data, default `latest`)。
|
||||
- TDengineCdcParams.ENABLE_AUTO_COMMIT: Whether to enable automatic consumption point submission,true: automatic submission;false:submit based on the `checkpoint` time, default to false.
|
||||
> **Note**:The automatic submission mode of the reader automatically submits data after obtaining it, regardless of whether the downstream operator has processed the data correctly. There is a risk of data loss, and it is mainly used for efficient stateless operator scenarios or scenarios with low data consistency requirements.
|
||||
|
||||
- TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS: Time interval for automatically submitting consumption records, in milliseconds, default 5000. This parameter takes effect when `ENABLE_AUTO_COMMIT` is set to true.
|
||||
- TDengineConfigParams.PROPERTY_KEY_ENABLE_COMPRESSION: Is compression enabled during the transmission process. true: Enable, false: Not enabled. The default is false.
|
||||
- TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable automatic reconnection. true: Enable, false: Not enabled. The default is false.
|
||||
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Automatic reconnection retry interval, in milliseconds, default value 2000. It only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
|
||||
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_RETRY_COUNT: The default value for automatic reconnection retry is 3, which only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
|
||||
- TDengineCdcParams.TMQ_SESSION_TIMEOUT_MS: Timeout after consumer heartbeat is lost, after which rebalance logic is triggered, and upon success, that consumer will be removed (supported from version 3.3.3.0),Default is 12000, range [6000, 1800000].
|
||||
- TDengineCdcParams.TMQ_MAX_POLL_INTERVAL_MS: The longest time interval for consumer poll data fetching, exceeding this time will be considered as the consumer being offline, triggering rebalance logic, and upon success, that consumer will be removed (supported from version 3.3.3.0) Default is 300000, range [1000, INT32_MAX].
|
||||
|
||||
#### Use CDC connector
|
||||
|
||||
The CDC connector will create consumers based on the parallelism set by the user, so the user should set the parallelism reasonably according to the resource situation.
|
||||
The subscription result is RowData data type example:
|
||||
|
||||
<details>
|
||||
<summary>CDC Source</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:cdc_source}}
|
||||
```
|
||||
</details>
|
||||
|
||||
Example of batch query results:
|
||||
|
||||
<details>
|
||||
<summary>CDC Batch Source</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:cdc_batch_source}}
|
||||
```
|
||||
</details>
|
||||
|
||||
Example of custom data type query result:
|
||||
|
||||
<details>
|
||||
<summary>CDC Custom Type</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:cdc_custom_type_test}}
|
||||
```
|
||||
</details>
|
||||
|
||||
- ResultBean is a custom inner class whose field names and data types correspond one-to-one with column names and data types. This allows the deserialization class corresponding to the value.ddeserializer property to deserialize objects of ResultBean type.
|
||||
|
||||
### Sink
|
||||
|
||||
The core function of Sink is to efficiently and accurately write Flink processed data from different data sources or operators into TDengine. In this process, the efficient write mechanism possessed by TDengine played a crucial role, effectively ensuring the fast and stable storage of data.
|
||||
|
||||
Sink Properties
|
||||
|
||||
- TDengineConfigParams.PROPERTY_KEY_USER: Login to TDengine username, default value is 'root '.
|
||||
- TDengineConfigParams.PROPERTY_KEY_PASSWORD: User login password, default value 'taosdata'.
|
||||
- TDengineConfigParams.PROPERTY_KEY_DBNAME: The database name.
|
||||
- TDengineConfigParams.TD_SUPERTABLE_NAME:The name of the super table. The received data must have a tbname field to determine which sub table to write to.
|
||||
- TDengineConfigParams.TD_TABLE_NAME: The table name of a sub table or a normal table. This parameter only needs to be set together with `TD_SUPERTABLE_NAME`.
|
||||
- TDengineConfigParams.VALUE_DESERIALIZER: The deserialization method for receiving result sets. If the type of the received result set is RowData of Flink, it only needs to be set to RowData. It is also possible to inherit 'TDengine SinkRecordSequencer' and implement the 'serialize' method, customizing the deserialization method based on the received data type.
|
||||
- TDengineConfigParams.TD_BATCH_SIZE: Set the batch size for writing to the `TDengine` database once | Writing will be triggered when the number of batches is reached, or when a checkpoint is set.
|
||||
- TDengineConfigParams.TD_BATCH_MODE: When set to True for receiving batch data, if the data source is `TDengine Source` , use the `SourceRecords Template` type to create a `TDengineSink` object; If the source is `TDengine CDC`, use the `ConsumerRecords Template` to create a `TDengineSink` object.
|
||||
- TDengineConfigParams.TD_SOURCE_TYPE: Set the data source. When the data source is `TDengine Source`, it is set to 'tdengine_stource', and when the source is `TDengine CDC`, it is set to 'tdengine_cdc'. When the configuration of `TD_BATCH_MODE` is set to True, it takes effect.
|
||||
- TDengineConfigParams.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: Message timeout time, in milliseconds, default value is 60000.
|
||||
- TDengineConfigParams.PROPERTY_KEY_ENABLE_COMPRESSION: Is compression enabled during the transmission process. true: Enable, false: Not enabled. The default is false.
|
||||
- TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable automatic reconnection. true: Enable, false: Not enabled. The default is false.
|
||||
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Automatic reconnection retry interval, in milliseconds, default value 2000. It only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
|
||||
- TDengineConfigParams.PROPERTY_KEY_RECONNECT_RETRY_COUNT: The default value for automatic reconnection retry is 3, which only takes effect when `PROPERTY_KEY_ENABLE_AUTO_RECONNECT` is true.
|
||||
- TDengineConfigParams.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Turn off SSL certificate verification. true: Enable, false: Not enabled. The default is false.
|
||||
|
||||
Usage example:
|
||||
|
||||
Write the sub table data of the meters table in the power database into the corresponding sub table of the sink_meters super table in the power_stink database.
|
||||
|
||||
<details>
|
||||
<summary>Sink RowData</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:RowDataToSink}}
|
||||
```
|
||||
</details>
|
||||
|
||||
Usage example:
|
||||
|
||||
Subscribe to the sub table data of the meters super table in the power database and write it to the corresponding sub table of the sink_meters super table in the power_stink database.
|
||||
|
||||
<details>
|
||||
<summary>Cdc Sink</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:CdcRowDataToSink}}
|
||||
```
|
||||
</details>
|
||||
|
||||
### Table SQL
|
||||
|
||||
Extract data from multiple different data source databases (such as TDengine, MySQL, Oracle, etc.) using Table SQL, perform custom operator operations (such as data cleaning, format conversion, associating data from different tables, etc.), and then load the processed results into the target data source (such as TDengine, MySQL, etc.).
|
||||
|
||||
#### Source connector
|
||||
|
||||
Parameter configuration instructions:
|
||||
|
||||
| Parameter Name | Type | Parameter Description |
|
||||
|-----------------------| :-----: | ------------ |
|
||||
| connector | string | connector identifier, set `tdengine-connector`|
|
||||
| td.jdbc.url | string | url of the connection |
|
||||
| td.jdbc.mode | strng | connector type: `source`, `sink`|
|
||||
| table.name | string | original or target table name |
|
||||
| scan.query | string | SQL statement to retrieve data|
|
||||
| sink.db.name | string | target database name|
|
||||
| sink.supertable.name | string | name of the supertable|
|
||||
| sink.batch.size | integer| batch size written|
|
||||
| sink.table.name | string | the table name of a sub table or a normal table |
|
||||
|
||||
Usage example:
|
||||
|
||||
Write the sub table data of the meters table in the power database into the corresponding sub table of the sink_meters super table in the power_stink database.
|
||||
|
||||
<details>
|
||||
<summary>Table Source</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:source_table}}
|
||||
```
|
||||
</details>
|
||||
|
||||
#### Table CDC connector
|
||||
|
||||
Parameter configuration instructions:
|
||||
|
||||
| Parameter Name | Type | Parameter Description |
|
||||
|-------------------| :-----: |--------------------------------------------------------------------------------------|
|
||||
| connector | string | connector identifier, set `tdengine-connector` |
|
||||
| user | string | username, default root |
|
||||
| password | string | password, default taosdata |
|
||||
| bootstrap. servers| string | server address |
|
||||
| topic | string | subscribe to topic |
|
||||
| td.jdbc.mode | strng | connector type: `cdc`, `sink` |
|
||||
| group.id | string | consumption group ID, sharing consumption progress within the same consumption group |
|
||||
| auto.offset.reset | string | initial position for consumer group subscription. <br/> `earliest`: subscribe from the beginning <br/> `latest` subscribe from the latest data <br/>default `latest`|
|
||||
| poll.interval_mas | integer | pull data interval, default 500ms |
|
||||
| sink.db.name | string | target database name |
|
||||
| sink.supertable.name | string | name of the supertable |
|
||||
| sink.batch.size | integer | batch size written |
|
||||
| sink.table.name | string | the table name of a sub table or a normal table |
|
||||
|
||||
Usage example:
|
||||
|
||||
Subscribe to the sub table data of the meters super table in the power database and write it to the corresponding sub table of the sink_meters super table in the power_stink database.
|
||||
|
||||
<details>
|
||||
<summary>Table CDC</summary>
|
||||
```java
|
||||
{{#include docs/examples/flink/Main.java:cdc_table}}
|
||||
```
|
||||
</details>
|
||||
|
|
@ -13,9 +13,9 @@ Through the Python connector of TDengine, Superset can support TDengine data sou
|
|||
|
||||
Prepare the following environment:
|
||||
- TDengine is installed and running normally (both Enterprise and Community versions are available)
|
||||
- taosAdapter is running normally, refer to [taosAdapter](../../../reference/components/taosAdapter)
|
||||
- taosAdapter is running normally, refer to [taosAdapter](../../../tdengine-reference/components/taosadapter/)
|
||||
- Apache Superset version 2.1.0 or above is already installed, refre to [Apache Superset](https://superset.apache.org/)
|
||||
|
||||
|
||||
## Install TDengine Python Connector
|
||||
|
||||
The Python connector of TDengine comes with a connection driver that supports Superset in versions 2.1.18 and later, which will be automatically installed in the Superset directory and provide data source services.
|
||||
|
|
|
@ -43,7 +43,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
||||
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
||||
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection, default value is 10s|
|
||||
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 0-86400000,in milliseconds, default value 10000|
|
||||
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||
|
||||
|
@ -77,12 +77,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000|
|
||||
|singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000|
|
||||
|filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan|
|
||||
|queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting|
|
||||
|
||||
### Region Related
|
||||
|
@ -190,7 +185,8 @@ The effective value of charset is UTF-8.
|
|||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|supportVnodes | |Supported, effective immediately |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
||||
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 0-1024, default value 4|
|
||||
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 1-1024, default value 4|
|
||||
|numOfCompactThreads | |Supported, effective after restart|Maximum number of commit threads, range 1-16, default value 2|
|
||||
|numOfMnodeReadThreads | |Supported, effective after restart|Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeQueryThreads | |Supported, effective after restart|Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfVnodeFetchThreads | |Supported, effective after restart|Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|
|
|
@ -13,45 +13,108 @@ import Icinga2 from "../../assets/resources/_icinga2.mdx"
|
|||
import TCollector from "../../assets/resources/_tcollector.mdx"
|
||||
|
||||
taosAdapter is a companion tool for TDengine, serving as a bridge and adapter between the TDengine cluster and applications. It provides an easy and efficient way to ingest data directly from data collection agents (such as Telegraf, StatsD, collectd, etc.). It also offers InfluxDB/OpenTSDB compatible data ingestion interfaces, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine.
|
||||
The connectors of TDengine in various languages communicate with TDengine through the WebSocket interface, hence the taosAdapter must be installed.
|
||||
|
||||
taosAdapter offers the following features:
|
||||
|
||||
- RESTful interface
|
||||
- Compatible with InfluxDB v1 write interface
|
||||
- Compatible with OpenTSDB JSON and telnet format writing
|
||||
- Seamless connection to Telegraf
|
||||
- Seamless connection to collectd
|
||||
- Seamless connection to StatsD
|
||||
- Supports Prometheus remote_read and remote_write
|
||||
- Retrieves the VGroup ID of the virtual node group (VGroup) where the table is located
|
||||
|
||||
## taosAdapter Architecture Diagram
|
||||
The architecture diagram is as follows:
|
||||
|
||||
<figure>
|
||||
<Image img={imgAdapter} alt="taosAdapter architecture"/>
|
||||
<figcaption>Figure 1. taosAdapter architecture</figcaption>
|
||||
</figure>
|
||||
|
||||
## Deployment Methods for taosAdapter
|
||||
## Feature List
|
||||
|
||||
### Installing taosAdapter
|
||||
The taosAdapter provides the following features:
|
||||
|
||||
- WebSocket Interface:
|
||||
Supports executing SQL, schemaless writing, parameter binding, and data subscription through the WebSocket protocol.
|
||||
- Compatible with InfluxDB v1 write interface:
|
||||
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
|
||||
- Compatible with OpenTSDB JSON and telnet format writing:
|
||||
- [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
|
||||
- [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
- collectd data writing:
|
||||
collectd is a system statistics collection daemon, visit [https://collectd.org/](https://collectd.org/) for more information.
|
||||
- StatsD data writing:
|
||||
StatsD is a simple yet powerful daemon for gathering statistics. Visit [https://github.com/statsd/statsd](https://github.com/statsd/statsd) for more information.
|
||||
- icinga2 OpenTSDB writer data writing:
|
||||
icinga2 is a software for collecting check results metrics and performance data. Visit [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) for more information.
|
||||
- TCollector data writing:
|
||||
TCollector is a client process that collects data from local collectors and pushes it to OpenTSDB. Visit [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) for more information.
|
||||
- node_exporter data collection and writing:
|
||||
node_exporter is an exporter of machine metrics. Visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
||||
- Supports Prometheus remote_read and remote_write:
|
||||
remote_read and remote_write are Prometheus's data read-write separation cluster solutions. Visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
||||
- RESTful API:
|
||||
[RESTful API](../../client-libraries/rest-api/)
|
||||
|
||||
### WebSocket Interface
|
||||
|
||||
Through the WebSocket interface of taosAdapter, connectors in various languages can achieve SQL execution, schemaless writing, parameter binding, and data subscription functionalities. Refer to the [Development Guide](../../../developer-guide/connecting-to-tdengine/#websocket-connection) for more details.
|
||||
|
||||
### Compatible with InfluxDB v1 write interface
|
||||
|
||||
You can use any client that supports the HTTP protocol to write data in InfluxDB compatible format to TDengine by accessing the Restful interface URL `http://<fqdn>:6041/influxdb/v1/write`.
|
||||
|
||||
Supported InfluxDB parameters are as follows:
|
||||
|
||||
- `db` specifies the database name used by TDengine
|
||||
- `precision` the time precision used by TDengine
|
||||
- `u` TDengine username
|
||||
- `p` TDengine password
|
||||
- `ttl` the lifespan of automatically created subtables, determined by the TTL parameter of the first data entry in the subtable, which cannot be updated. For more information, please refer to the TTL parameter in the [table creation document](../../sql-manual/manage-tables/).
|
||||
|
||||
Note: Currently, InfluxDB's token authentication method is not supported, only Basic authentication and query parameter verification are supported.
|
||||
Example: `curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"`
|
||||
|
||||
### Compatible with OpenTSDB JSON and telnet format writing
|
||||
|
||||
You can use any client that supports the HTTP protocol to write data in OpenTSDB compatible format to TDengine by accessing the Restful interface URL `http://<fqdn>:6041/<APIEndPoint>`. EndPoint as follows:
|
||||
|
||||
```text
|
||||
/opentsdb/v1/put/json/<db>
|
||||
/opentsdb/v1/put/telnet/<db>
|
||||
```
|
||||
|
||||
### collectd data writing
|
||||
|
||||
<CollectD />
|
||||
|
||||
### StatsD data writing
|
||||
|
||||
<StatsD />
|
||||
|
||||
### icinga2 OpenTSDB writer data writing
|
||||
|
||||
<Icinga2 />
|
||||
|
||||
### TCollector data writing
|
||||
|
||||
<TCollector />
|
||||
|
||||
### node_exporter data collection and writing
|
||||
|
||||
An exporter used by Prometheus that exposes hardware and operating system metrics from \*NIX kernels
|
||||
|
||||
- Enable configuration of taosAdapter node_exporter.enable
|
||||
- Set the relevant configuration for node_exporter
|
||||
- Restart taosAdapter
|
||||
|
||||
### Supports Prometheus remote_read and remote_write
|
||||
|
||||
<Prometheus />
|
||||
|
||||
### RESTful API
|
||||
|
||||
You can use any client that supports the HTTP protocol to write data to TDengine or query data from TDengine by accessing the RESTful interface URL `http://<fqdn>:6041/rest/sql`. For details, please refer to the [REST API documentation](../../client-libraries/rest-api/).
|
||||
|
||||
## Installation
|
||||
|
||||
taosAdapter is part of the TDengine server software. If you are using TDengine server, you do not need any additional steps to install taosAdapter. If you need to deploy taosAdapter separately from the TDengine server, you should install the complete TDengine on that server to install taosAdapter. If you need to compile taosAdapter from source code, you can refer to the [Build taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) document.
|
||||
|
||||
### Starting/Stopping taosAdapter
|
||||
After the installation is complete, you can start the taosAdapter service using the command `systemctl start taosadapter`.
|
||||
|
||||
On Linux systems, the taosAdapter service is managed by default by systemd. Use the command `systemctl start taosadapter` to start the taosAdapter service. Use the command `systemctl stop taosadapter` to stop the taosAdapter service.
|
||||
|
||||
### Removing taosAdapter
|
||||
|
||||
Use the command rmtaos to remove the TDengine server software, including taosAdapter.
|
||||
|
||||
### Upgrading taosAdapter
|
||||
|
||||
taosAdapter and TDengine server need to use the same version. Please upgrade taosAdapter by upgrading the TDengine server.
|
||||
taosAdapter deployed separately from taosd must be upgraded by upgrading the TDengine server on its server.
|
||||
|
||||
## taosAdapter Parameter List
|
||||
## Configuration
|
||||
|
||||
taosAdapter supports configuration through command-line parameters, environment variables, and configuration files. The default configuration file is `/etc/taos/taosadapter.toml`.
|
||||
|
||||
|
@ -80,6 +143,7 @@ Usage of taosAdapter:
|
|||
--instanceId int instance ID. Env "TAOS_ADAPTER_INSTANCE_ID" (default 32)
|
||||
--log.compress whether to compress old log. Env "TAOS_ADAPTER_LOG_COMPRESS"
|
||||
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
|
||||
--log.keepDays uint log retention days, must be a positive integer. Env "TAOS_ADAPTER_LOG_KEEP_DAYS" (default 30)
|
||||
--log.level string log level (trace debug info warning error). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
|
||||
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
|
||||
--log.reservedDiskSize string reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_RESERVED_DISK_SIZE" (default "1GB")
|
||||
|
@ -90,6 +154,8 @@ Usage of taosAdapter:
|
|||
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
|
||||
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
|
||||
--logLevel string log level (trace debug info warning error). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
|
||||
--maxAsyncConcurrentLimit int The maximum number of concurrent calls allowed for the C asynchronous method. 0 means use CPU core count. Env "TAOS_ADAPTER_MAX_ASYNC_CONCURRENT_LIMIT"
|
||||
--maxSyncConcurrentLimit int The maximum number of concurrent calls allowed for the C synchronized method. 0 means use CPU core count. Env "TAOS_ADAPTER_MAX_SYNC_CONCURRENT_LIMIT"
|
||||
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
|
||||
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE" (default true)
|
||||
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
|
||||
|
@ -118,7 +184,7 @@ Usage of taosAdapter:
|
|||
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
|
||||
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
|
||||
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
|
||||
--opentsdb_telnet.ports ints opentsdb_telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
|
||||
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
|
||||
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
|
||||
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
|
||||
|
@ -131,6 +197,9 @@ Usage of taosAdapter:
|
|||
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
|
||||
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
|
||||
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
|
||||
--ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE"
|
||||
--ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE"
|
||||
--ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE"
|
||||
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
|
||||
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
|
||||
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
|
||||
|
@ -157,27 +226,44 @@ Usage of taosAdapter:
|
|||
-V, --version Print the version and exit
|
||||
```
|
||||
|
||||
Note:
|
||||
When using a browser to make API calls, please set the following Cross-Origin Resource Sharing (CORS) parameters according to the actual situation:
|
||||
See the example configuration file at [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/3.0/example/config/taosadapter.toml).
|
||||
|
||||
```text
|
||||
AllowAllOrigins
|
||||
AllowOrigins
|
||||
AllowHeaders
|
||||
ExposeHeaders
|
||||
AllowCredentials
|
||||
AllowWebSockets
|
||||
```
|
||||
### Cross-Origin Configuration
|
||||
|
||||
When making API calls from the browser, please configure the following Cross-Origin Resource Sharing (CORS) parameters based on your actual situation:
|
||||
|
||||
- **`cors.allowAllOrigins`**: Whether to allow all origins to access, default is true.
|
||||
- **`cors.allowOrigins`**: A comma-separated list of origins allowed to access. Multiple origins can be specified.
|
||||
- **`cors.allowHeaders`**: A comma-separated list of request headers allowed for cross-origin access. Multiple headers can be specified.
|
||||
- **`cors.exposeHeaders`**: A comma-separated list of response headers exposed for cross-origin access. Multiple headers can be specified.
|
||||
- **`cors.allowCredentials`**: Whether to allow cross-origin requests to include user credentials, such as cookies, HTTP authentication information, or client SSL certificates.
|
||||
- **`cors.allowWebSockets`**: Whether to allow WebSockets connections.
|
||||
|
||||
If you are not making API calls through a browser, you do not need to worry about these configurations.
|
||||
|
||||
The above configurations take effect for the following interfaces:
|
||||
|
||||
* RESTful API requests
|
||||
* WebSocket API requests
|
||||
* InfluxDB v1 write interface
|
||||
* OpenTSDB HTTP write interface
|
||||
|
||||
For details about the CORS protocol, please refer to: [https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) or [https://developer.mozilla.org/docs/Web/HTTP/CORS](https://developer.mozilla.org/docs/Web/HTTP/CORS).
|
||||
|
||||
See the example configuration file at [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/3.0/example/config/taosadapter.toml).
|
||||
### Connection Pool Configuration
|
||||
|
||||
### Connection Pool Parameters Description
|
||||
taosAdapter uses a connection pool to manage connections to TDengine, improving concurrency performance and resource utilization. The connection pool configuration applies to the following interfaces, and these interfaces share a single connection pool:
|
||||
|
||||
When using the RESTful API, the system will manage TDengine connections through a connection pool. The connection pool can be configured with the following parameters:
|
||||
* RESTful API requests
|
||||
* InfluxDB v1 write interface
|
||||
* OpenTSDB JSON and telnet format writing
|
||||
* Telegraf data writing
|
||||
* collectd data writing
|
||||
* StatsD data writing
|
||||
* node_exporter data collection writing
|
||||
* Prometheus remote_read and remote_write
|
||||
|
||||
The configuration parameters for the connection pool are as follows:
|
||||
|
||||
- **`pool.maxConnect`**: The maximum number of connections allowed in the pool, default is twice the number of CPU cores. It is recommended to keep the default setting.
|
||||
- **`pool.maxIdle`**: The maximum number of idle connections in the pool, default is the same as `pool.maxConnect`. It is recommended to keep the default setting.
|
||||
|
@ -185,138 +271,136 @@ When using the RESTful API, the system will manage TDengine connections through
|
|||
- **`pool.waitTimeout`**: Timeout for obtaining a connection from the pool, default is set to 60 seconds. If a connection is not obtained within the timeout period, HTTP status code 503 will be returned. This parameter is available starting from version 3.3.3.0.
|
||||
- **`pool.maxWait`**: The maximum number of requests waiting to get a connection in the pool, default is 0, which means no limit. When the number of queued requests exceeds this value, new requests will return HTTP status code 503. This parameter is available starting from version 3.3.3.0.
|
||||
|
||||
## Feature List
|
||||
### HTTP Response Code Configuration
|
||||
|
||||
- RESTful API
|
||||
[RESTful API](../../client-libraries/rest-api/)
|
||||
- Compatible with InfluxDB v1 write interface
|
||||
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
|
||||
- Compatible with OpenTSDB JSON and telnet format writing
|
||||
- [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
|
||||
- [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
|
||||
- Seamless connection with collectd.
|
||||
collectd is a system statistics collection daemon, visit [https://collectd.org/](https://collectd.org/) for more information.
|
||||
- Seamless connection with StatsD.
|
||||
StatsD is a simple yet powerful daemon for gathering statistics. Visit [https://github.com/statsd/statsd](https://github.com/statsd/statsd) for more information.
|
||||
- Seamless connection with icinga2.
|
||||
icinga2 is a software for collecting check results metrics and performance data. Visit [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) for more information.
|
||||
- Seamless connection with tcollector.
|
||||
TCollector is a client process that collects data from local collectors and pushes it to OpenTSDB. Visit [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) for more information.
|
||||
- Seamless connection with node_exporter.
|
||||
node_exporter is an exporter of machine metrics. Visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
|
||||
- Supports Prometheus remote_read and remote_write.
|
||||
remote_read and remote_write are Prometheus's data read-write separation cluster solutions. Visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
|
||||
- Get the VGroup ID of the virtual node group (VGroup) where the table is located.
|
||||
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 HTTP status code when the C interface returns an error. When set to true, it will return different HTTP status codes based on the error code returned by C. See [HTTP Response Codes](../../client-libraries/rest-api/) for details.
|
||||
|
||||
## Interface
|
||||
This configuration only affects the **RESTful interface**.
|
||||
|
||||
### TDengine RESTful Interface
|
||||
**Parameter Description**
|
||||
|
||||
You can use any client that supports the HTTP protocol to write data to TDengine or query data from TDengine by accessing the RESTful interface URL `http://<fqdn>:6041/rest/sql`. For details, please refer to the [REST API documentation](../../client-libraries/rest-api/).
|
||||
- **`httpCodeServerError`**:
|
||||
- **When set to `true`**: Map the error code returned by the C interface to the corresponding HTTP status code.
|
||||
- **When set to `false`**: Regardless of the error returned by the C interface, always return the HTTP status code `200` (default value).
|
||||
|
||||
### InfluxDB
|
||||
### Memory limit configuration
|
||||
|
||||
You can use any client that supports the HTTP protocol to write data in InfluxDB compatible format to TDengine by accessing the Restful interface URL `http://<fqdn>:6041/influxdb/v1/write`.
|
||||
taosAdapter will monitor the memory usage during its operation and adjust it through two thresholds. The valid value range is an integer from 1 to 100, and the unit is the percentage of system physical memory.
|
||||
|
||||
Supported InfluxDB parameters are as follows:
|
||||
This configuration only affects the following interfaces:
|
||||
|
||||
- `db` specifies the database name used by TDengine
|
||||
- `precision` the time precision used by TDengine
|
||||
- `u` TDengine username
|
||||
- `p` TDengine password
|
||||
- `ttl` the lifespan of automatically created subtables, determined by the TTL parameter of the first data entry in the subtable, which cannot be updated. For more information, please refer to the TTL parameter in the [table creation document](../../sql-manual/manage-tables/).
|
||||
* RESTful interface request
|
||||
* InfluxDB v1 write interface
|
||||
* OpenTSDB HTTP write interface
|
||||
* Prometheus remote_read and remote_write interfaces
|
||||
|
||||
Note: Currently, InfluxDB's token authentication method is not supported, only Basic authentication and query parameter verification are supported.
|
||||
Example: curl --request POST `http://127.0.0.1:6041/influxdb/v1/write?db=test` --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
|
||||
**Parameter Description**
|
||||
|
||||
### OpenTSDB
|
||||
- **`pauseQueryMemoryThreshold`**:
|
||||
- When memory usage exceeds this threshold, taosAdapter will stop processing query requests.
|
||||
- Default value: `70` (i.e. 70% of system physical memory).
|
||||
- **`pauseAllMemoryThreshold`**:
|
||||
- When memory usage exceeds this threshold, taosAdapter will stop processing all requests (including writes and queries).
|
||||
- Default value: `80` (i.e. 80% of system physical memory).
|
||||
|
||||
You can use any client that supports the HTTP protocol to write data in OpenTSDB compatible format to TDengine by accessing the Restful interface URL `http://<fqdn>:6041/<APIEndPoint>`. EndPoint as follows:
|
||||
When memory usage falls below the threshold, taosAdapter will automatically resume the corresponding function.
|
||||
|
||||
```text
|
||||
/opentsdb/v1/put/json/<db>
|
||||
/opentsdb/v1/put/telnet/<db>
|
||||
**HTTP return content:**
|
||||
|
||||
- **When `pauseQueryMemoryThreshold` is exceeded**:
|
||||
- HTTP status code: `503`
|
||||
- Return content: `"query memory exceeds threshold"`
|
||||
|
||||
- **When `pauseAllMemoryThreshold` is exceeded**:
|
||||
- HTTP status code: `503`
|
||||
- Return content: `"memory exceeds threshold"`
|
||||
|
||||
**Status check interface:**
|
||||
|
||||
The memory status of taosAdapter can be checked through the following interface:
|
||||
- **Normal status**: `http://<fqdn>:6041/-/ping` returns `code 200`.
|
||||
- **Memory exceeds threshold**:
|
||||
- If the memory exceeds `pauseAllMemoryThreshold`, `code 503` is returned.
|
||||
- If the memory exceeds `pauseQueryMemoryThreshold` and the request parameter contains `action=query`, `code 503` is returned.
|
||||
|
||||
**Related configuration parameters:**
|
||||
|
||||
- **`monitor.collectDuration`**: memory monitoring interval, default value is `3s`, environment variable is `TAOS_MONITOR_COLLECT_DURATION`.
|
||||
- **`monitor.incgroup`**: whether to run in a container (set to `true` for running in a container), default value is `false`, environment variable is `TAOS_MONITOR_INCGROUP`.
|
||||
- **`monitor.pauseQueryMemoryThreshold`**: memory threshold (percentage) for query request pause, default value is `70`, environment variable is `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD`.
|
||||
- **`monitor.pauseAllMemoryThreshold`**: memory threshold (percentage) for query and write request pause, default value is `80`, environment variable is `TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD`.
|
||||
|
||||
You can make corresponding adjustments based on the specific project application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor the system memory status in a timely manner. The load balancer can also check the operation status of taosAdapter through this interface.
|
||||
|
||||
### Schemaless write create DB configuration
|
||||
|
||||
Starting from **version 3.0.4.0**, taosAdapter provides the parameter `smlAutoCreateDB` to control whether to automatically create a database (DB) when writing to the schemaless protocol.
|
||||
|
||||
The `smlAutoCreateDB` parameter only affects the following interfaces:
|
||||
|
||||
- InfluxDB v1 write interface
|
||||
- OpenTSDB JSON and telnet format writing
|
||||
- Telegraf data writing
|
||||
- collectd data writing
|
||||
- StatsD data writing
|
||||
- node_exporter data writing
|
||||
|
||||
**Parameter Description**
|
||||
|
||||
- **`smlAutoCreateDB`**:
|
||||
- **When set to `true`**: When writing to the schemaless protocol, if the target database does not exist, taosAdapter will automatically create the database.
|
||||
- **When set to `false`**: The user needs to manually create the database, otherwise the write will fail (default value).
|
||||
|
||||
### Number of results returned configuration
|
||||
|
||||
taosAdapter provides the parameter `restfulRowLimit` to control the number of results returned by the HTTP interface.
|
||||
|
||||
The `restfulRowLimit` parameter only affects the return results of the following interfaces:
|
||||
|
||||
- RESTful interface
|
||||
- Prometheus remote_read interface
|
||||
|
||||
**Parameter Description**
|
||||
|
||||
- **`restfulRowLimit`**:
|
||||
- **When set to a positive integer**: The number of results returned by the interface will not exceed this value.
|
||||
- **When set to `-1`**: The number of results returned by the interface is unlimited (default value).
|
||||
|
||||
### Log configuration
|
||||
|
||||
1. You can set the taosAdapter log output detail level by setting the --log.level parameter or the environment variable TAOS_ADAPTER_LOG_LEVEL. Valid values include: panic, fatal, error, warn, warning, info, debug, and trace.
|
||||
2. Starting from **3.3.5.0 version**, taosAdapter supports dynamic modification of log level through HTTP interface. Users can dynamically adjust the log level by sending HTTP PUT request to /config interface. The authentication method of this interface is the same as /rest/sql interface, and the configuration item key-value pair in JSON format must be passed in the request body.
|
||||
|
||||
The following is an example of setting the log level to debug through the curl command:
|
||||
|
||||
```shell
|
||||
curl --location --request PUT 'http://127.0.0.1:6041/config' \
|
||||
-u root:taosdata \
|
||||
--data '{"log.level": "debug"}'
|
||||
```
|
||||
|
||||
### collectd
|
||||
## Service Management
|
||||
|
||||
<CollectD />
|
||||
### Starting/Stopping taosAdapter
|
||||
|
||||
### StatsD
|
||||
On Linux systems, the taosAdapter service is managed by default by systemd. Use the command `systemctl start taosadapter` to start the taosAdapter service. Use the command `systemctl stop taosadapter` to stop the taosAdapter service.
|
||||
|
||||
<StatsD />
|
||||
### Upgrading taosAdapter
|
||||
|
||||
### icinga2 OpenTSDB writer
|
||||
taosAdapter and TDengine server need to use the same version. Please upgrade taosAdapter by upgrading the TDengine server.
|
||||
taosAdapter deployed separately from taosd must be upgraded by upgrading the TDengine server on its server.
|
||||
|
||||
<Icinga2 />
|
||||
### Removing taosAdapter
|
||||
|
||||
### TCollector
|
||||
Use the command rmtaos to remove the TDengine server software, including taosAdapter.
|
||||
|
||||
<TCollector />
|
||||
## Monitoring Metrics
|
||||
|
||||
### node_exporter
|
||||
Currently, taosAdapter only collects monitoring indicators for RESTful/WebSocket related requests. There are no monitoring indicators for other interfaces.
|
||||
|
||||
An exporter used by Prometheus that exposes hardware and operating system metrics from \*NIX kernels
|
||||
taosAdapter reports monitoring indicators to taosKeeper, which will be written to the monitoring database by taosKeeper. The default is the `log` database, which can be modified in the taoskeeper configuration file. The following is a detailed introduction to these monitoring indicators.
|
||||
|
||||
- Enable configuration of taosAdapter node_exporter.enable
|
||||
- Set the relevant configuration for node_exporter
|
||||
- Restart taosAdapter
|
||||
|
||||
### prometheus
|
||||
|
||||
<Prometheus />
|
||||
|
||||
### Getting the VGroup ID of a table
|
||||
|
||||
You can access the HTTP interface `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` to get the VGroup ID of a table.
|
||||
|
||||
## Memory Usage Optimization Methods
|
||||
|
||||
taosAdapter will monitor its memory usage during operation and adjust it through two thresholds. Valid values range from -1 to 100 as a percentage of system physical memory.
|
||||
|
||||
- pauseQueryMemoryThreshold
|
||||
- pauseAllMemoryThreshold
|
||||
|
||||
When the pauseQueryMemoryThreshold threshold is exceeded, it stops processing query requests.
|
||||
|
||||
HTTP return content:
|
||||
|
||||
- code 503
|
||||
- body "query memory exceeds threshold"
|
||||
|
||||
When the pauseAllMemoryThreshold threshold is exceeded, it stops processing all write and query requests.
|
||||
|
||||
HTTP return content:
|
||||
|
||||
- code 503
|
||||
- body "memory exceeds threshold"
|
||||
|
||||
When memory falls below the threshold, the corresponding functions are resumed.
|
||||
|
||||
Status check interface `http://<fqdn>:6041/-/ping`
|
||||
|
||||
- Normally returns `code 200`
|
||||
- No parameters If memory exceeds pauseAllMemoryThreshold, it will return `code 503`
|
||||
- Request parameter `action=query` If memory exceeds either pauseQueryMemoryThreshold or pauseAllMemoryThreshold, it will return `code 503`
|
||||
|
||||
Corresponding configuration parameters
|
||||
|
||||
```text
|
||||
monitor.collectDuration Monitoring interval Environment variable "TAOS_MONITOR_COLLECT_DURATION" (default value 3s)
|
||||
monitor.incgroup Whether it is running in cgroup (set to true in containers) Environment variable "TAOS_MONITOR_INCGROUP"
|
||||
monitor.pauseAllMemoryThreshold Memory threshold for stopping inserts and queries Environment variable "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default value 80)
|
||||
monitor.pauseQueryMemoryThreshold Memory threshold for stopping queries Environment variable "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default value 70)
|
||||
```
|
||||
|
||||
You can adjust according to the specific project application scenario and operational strategy, and it is recommended to use operational monitoring software to monitor the system memory status in real time. Load balancers can also check the running status of taosAdapter through this interface.
|
||||
|
||||
## taosAdapter Monitoring Metrics
|
||||
|
||||
taosAdapter collects monitoring metrics related to REST/WebSocket requests. These monitoring metrics are reported to taosKeeper, which writes them into the monitoring database, by default the `log` database, which can be modified in the taoskeeper configuration file. Below is a detailed introduction to these monitoring metrics.
|
||||
|
||||
### adapter_requests table
|
||||
|
||||
`adapter_requests` records taosadapter monitoring data.
|
||||
The `adapter_requests` table records taosAdapter monitoring data, and the fields are as follows:
|
||||
|
||||
| field | type | is_tag | comment |
|
||||
| :--------------- | :----------- | :----- | :---------------------------------------- |
|
||||
|
@ -339,32 +423,10 @@ taosAdapter collects monitoring metrics related to REST/WebSocket requests. Thes
|
|||
| endpoint | VARCHAR | | request endpoint |
|
||||
| req_type | NCHAR | tag | request type: 0 for REST, 1 for WebSocket |
|
||||
|
||||
## Result Return Limit
|
||||
|
||||
taosAdapter controls the number of results returned through the parameter `restfulRowLimit`, -1 represents no limit, default is no limit.
|
||||
## Changes after upgrading httpd to taosAdapter
|
||||
|
||||
This parameter controls the return of the following interfaces
|
||||
|
||||
- `http://<fqdn>:6041/rest/sql`
|
||||
- `http://<fqdn>:6041/prometheus/v1/remote_read/:db`
|
||||
|
||||
## Configure HTTP Return Codes
|
||||
|
||||
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 HTTP status code when the C interface returns an error. When set to true, it will return different HTTP status codes based on the error code returned by C. See [HTTP Response Codes](../../client-libraries/rest-api/) for details.
|
||||
|
||||
## Configure Automatic DB Creation for Schemaless Writes
|
||||
|
||||
Starting from version 3.0.4.0, taosAdapter provides the parameter `smlAutoCreateDB` to control whether to automatically create a DB when writing via the schemaless protocol. The default value is false, which does not automatically create a DB, and requires the user to manually create a DB before performing schemaless writes.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can check the running status of taosAdapter with the command `systemctl status taosadapter`.
|
||||
|
||||
You can also adjust the detail level of taosAdapter log output by setting the --logLevel parameter or the environment variable TAOS_ADAPTER_LOG_LEVEL. Valid values include: panic, fatal, error, warn, warning, info, debug, and trace.
|
||||
|
||||
## How to Migrate from Older Versions of TDengine to taosAdapter
|
||||
|
||||
In TDengine server version 2.2.x.x or earlier, the taosd process included an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed by systemd, having its own process. Moreover, there are some differences in configuration parameters and behaviors between the two, as shown in the table below:
|
||||
In TDengine server version 2.2.x.x or earlier, the taosd process included an embedded HTTP service(httpd). As mentioned earlier, taosAdapter is a standalone software managed by systemd, having its own process. Moreover, there are some differences in configuration parameters and behaviors between the two, as shown in the table below:
|
||||
|
||||
| **#** | **embedded httpd** | **taosAdapter** | **comment** |
|
||||
| ----- | ------------------- | ---------------------------------------------------------- | ------------------------------------------------------------ |
|
||||
|
|
|
@ -246,13 +246,14 @@ The query performance test mainly outputs the QPS indicator of query request spe
|
|||
|
||||
``` bash
|
||||
complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ...
|
||||
INFO: Total specified queries: 30000
|
||||
INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049
|
||||
```
|
||||
|
||||
- The first line represents the percentile distribution of query execution and query request delay for each of the three threads executing 10000 queries. The SQL command is the test query statement
|
||||
- The second line indicates that a total of 10000 * 3 = 30000 queries have been completed
|
||||
- The third line indicates that the total query time is 26.9653 seconds, and the query rate per second (QPS) is 1113.049 times/second
|
||||
- The second line indicates that the total query time is 26.9653 seconds, the total queries is 10000 * 3 = 30000, and the query rate per second (QPS) is 1113.049 times/second
|
||||
- If the `continue_if_fail` option is set to `yes` in the query, the last line will output the number of failed requests and error rate, the format like "error + number of failed requests (error rate)"
|
||||
- QPS = number of successful requests / time spent (in seconds)
|
||||
- Error rate = number of failed requests / (number of successful requests + number of failed requests)
|
||||
|
||||
#### Subscription metrics
|
||||
|
||||
|
@ -334,9 +335,9 @@ Parameters related to supertable creation are configured in the `super_tables` s
|
|||
|
||||
- **child_table_exists**: Whether the child table already exists, default is "no", options are "yes" or "no".
|
||||
|
||||
- **child_table_count**: Number of child tables, default is 10.
|
||||
- **childtable_count**: Number of child tables, default is 10.
|
||||
|
||||
- **child_table_prefix**: Prefix for child table names, mandatory, no default value.
|
||||
- **childtable_prefix**: Prefix for child table names, mandatory, no default value.
|
||||
|
||||
- **escape_character**: Whether the supertable and child table names contain escape characters, default is "no", options are "yes" or "no".
|
||||
|
||||
|
@ -403,7 +404,7 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
- **min**: The minimum value for the data type of the column/tag. Generated values will be greater than or equal to the minimum value.
|
||||
|
||||
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the minimum value.
|
||||
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the maximum value.
|
||||
|
||||
- **scalingFactor**: Floating-point precision enhancement factor, only effective when the data type is float/double, valid values range from 1 to 1000000 positive integers. Used to enhance the precision of generated floating points, especially when min or max values are small. This attribute enhances the precision after the decimal point by powers of 10: a scalingFactor of 10 means enhancing the precision by 1 decimal place, 100 means 2 places, and so on.
|
||||
|
||||
|
@ -431,11 +432,9 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
- **create_table_thread_count** : The number of threads for creating tables, default is 8.
|
||||
|
||||
- **connection_pool_size** : The number of pre-established connections with the TDengine server. If not configured, it defaults to the specified number of threads.
|
||||
|
||||
- **result_file** : The path to the result output file, default is ./output.txt.
|
||||
|
||||
- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The default value is false.
|
||||
- **confirm_parameter_prompt** : A toggle parameter that requires user confirmation after a prompt to continue. The value can be "yes" or "no", by default "no".
|
||||
|
||||
- **interlace_rows** : Enables interleaved insertion mode and specifies the number of rows to insert into each subtable at a time. Interleaved insertion mode refers to inserting the specified number of rows into each subtable in sequence and repeating this process until all subtable data has been inserted. The default value is 0, meaning data is inserted into one subtable completely before moving to the next.
|
||||
This parameter can also be configured in `super_tables`; if configured, the settings in `super_tables` take higher priority and override the global settings.
|
||||
|
@ -464,12 +463,12 @@ For other common parameters, see Common Configuration Parameters.
|
|||
|
||||
Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`.
|
||||
|
||||
- **mixed_query** "yes": `Mixed Query` "no": `Normal Query`, default is "no"
|
||||
`Mixed Query`: All SQL statements in `sqls` are grouped by the number of threads, with each thread executing one group. Each SQL statement in a thread needs to perform `query_times` queries.
|
||||
`Normal Query `: Each SQL in `sqls` starts `threads` and exits after executing `query_times` times. The next SQL can only be executed after all previous SQL threads have finished executing and exited.
|
||||
Regardless of whether it is a `Normal Query` or `Mixed Query`, the total number of query executions is the same. The total number of queries = `sqls` * `threads` * `query_times`. The difference is that `Normal Query` starts `threads` for each SQL query, while ` Mixed Query` only starts `threads` once to complete all SQL queries. The number of thread startups for the two is different.
|
||||
- `General Query`: Each SQL in `sqls` starts `threads` threads to query this SQL, Each thread exits after executing the `query_times` queries, and only after all threads executing this SQL have completed can the next SQL be executed.
|
||||
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
|
||||
- `Mixed Query` : All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
|
||||
|
||||
- **query_interval** : Query interval, in seconds, default is 0.
|
||||
- **query_interval** : Query interval, in millisecond, default is 0.
|
||||
|
||||
- **threads** : Number of threads executing the SQL query, default is 1.
|
||||
|
||||
|
@ -491,6 +490,7 @@ The thread mode of the super table query is the same as the `Normal Query` mode
|
|||
- **sqls** :
|
||||
- **sql** : The SQL command to execute, required; for supertable queries, keep "xxxx" in the SQL command, the program will automatically replace it with all subtable names of the supertable.
|
||||
- **result** : File to save the query results, if not specified, results are not saved.
|
||||
- **Note**: The maximum number of SQL arrays configured under SQL is 100.
|
||||
|
||||
### Configuration Parameters for Subscription Scenarios
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ database_option: {
|
|||
- MINROWS: The minimum number of records in a file block, default is 100.
|
||||
- KEEP: Indicates the number of days data files are kept, default value is 3650, range [1, 365000], and must be greater than or equal to 3 times the DURATION parameter value. The database will automatically delete data that has been saved for longer than the KEEP value to free up storage space. KEEP can use unit-specified formats, such as KEEP 100h, KEEP 10d, etc., supports m (minutes), h (hours), and d (days) three units. It can also be written without a unit, like KEEP 50, where the default unit is days. The enterprise version supports multi-tier storage feature, thus, multiple retention times can be set (multiple separated by commas, up to 3, satisfying keep 0 \<= keep 1 \<= keep 2, such as KEEP 100h,100d,3650d); the community version does not support multi-tier storage feature (even if multiple retention times are configured, it will not take effect, KEEP will take the longest retention time).
|
||||
- KEEP_TIME_OFFSET: Effective from version 3.2.0.0. The delay execution time for deleting or migrating data that has been saved for longer than the KEEP value, default value is 0 (hours). After the data file's save time exceeds KEEP, the deletion or migration operation will not be executed immediately, but will wait an additional interval specified by this parameter, to avoid peak business periods.
|
||||
- STT_TRIGGER: Indicates the number of file merges triggered by disk files. The open-source version is fixed at 1, the enterprise version can be set from 1 to 16. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value.
|
||||
- STT_TRIGGER: Indicates the number of file merges triggered by disk files. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value.
|
||||
- SINGLE_STABLE: Indicates whether only one supertable can be created in this database, used in cases where the supertable has a very large number of columns.
|
||||
- 0: Indicates that multiple supertables can be created.
|
||||
- 1: Indicates that only one supertable can be created.
|
||||
|
@ -144,10 +144,6 @@ You can view cacheload through show \<db_name>.vgroups;
|
|||
|
||||
If cacheload is very close to cachesize, then cachesize may be too small. If cacheload is significantly less than cachesize, then cachesize is sufficient. You can decide whether to modify cachesize based on this principle. The specific modification value can be determined based on the available system memory, whether to double it or increase it several times.
|
||||
|
||||
4. stt_trigger
|
||||
|
||||
Please stop database writing before modifying the stt_trigger parameter.
|
||||
|
||||
:::note
|
||||
Other parameters are not supported for modification in version 3.0.0.0
|
||||
|
||||
|
|
|
@ -491,15 +491,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
|
|||
|
||||
:::
|
||||
|
||||
## UNION ALL Clause
|
||||
## UNION Clause
|
||||
|
||||
```text title=Syntax
|
||||
SELECT ...
|
||||
UNION ALL SELECT ...
|
||||
[UNION ALL SELECT ...]
|
||||
UNION [ALL] SELECT ...
|
||||
[UNION [ALL] SELECT ...]
|
||||
```
|
||||
|
||||
TDengine supports the UNION ALL operator. This means that if multiple SELECT clauses return result sets with the exact same structure (column names, column types, number of columns, order), these result sets can be combined together using UNION ALL. Currently, only the UNION ALL mode is supported, which means that duplicates are not removed during the merging process. In the same SQL statement, a maximum of 100 UNION ALLs are supported.
|
||||
TDengine supports the UNION [ALL] operator. This means that if multiple SELECT clauses return result sets with the exact same structure (column names, column types, number of columns, order), these result sets can be combined together using UNION [ALL].
|
||||
|
||||
## SQL Examples
|
||||
|
||||
|
|
|
@ -2171,7 +2171,7 @@ ignore_negative: {
|
|||
|
||||
**Usage Instructions**:
|
||||
|
||||
- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE() from.
|
||||
- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE(col1, 1s, 1) from tb1.
|
||||
|
||||
### DIFF
|
||||
|
||||
|
|
|
@ -148,6 +148,7 @@ When using time windows, note:
|
|||
- The window width of the aggregation period is specified by the keyword INTERVAL, with the shortest interval being 10 milliseconds (10a); it also supports an offset (the offset must be less than the interval), which is the offset of the time window division compared to "UTC moment 0". The SLIDING statement is used to specify the forward increment of the aggregation period, i.e., the duration of each window slide forward.
|
||||
- When using the INTERVAL statement, unless in very special cases, it is required to configure the timezone parameter in the taos.cfg configuration files of both the client and server to the same value to avoid frequent cross-time zone conversions by time processing functions, which can cause severe performance impacts.
|
||||
- The returned results have a strictly monotonically increasing time-series.
|
||||
- When using AUTO as the window offset, if the WHERE time condition is complex, such as multiple AND/OR/IN combinations, AUTO may not take effect. In such cases, you can manually specify the window offset to resolve the issue.
|
||||
- When using AUTO as the window offset, if the window width unit is d (day), n (month), w (week), y (year), such as: INTERVAL(1d, AUTO), INTERVAL(3w, AUTO), the TSMA optimization cannot take effect. If TSMA is manually created on the target table, the statement will report an error and exit; in this case, you can explicitly specify the Hint SKIP_TSMA or not use AUTO as the window offset.
|
||||
|
||||
### State Window
|
||||
|
|
|
@ -43,7 +43,8 @@ TDengine supports `UNION ALL` and `UNION` operators. UNION ALL combines the resu
|
|||
| 9 | LIKE | BINARY, NCHAR, and VARCHAR | Matches the specified pattern string with wildcard |
|
||||
| 10 | NOT LIKE | BINARY, NCHAR, and VARCHAR | Does not match the specified pattern string with wildcard |
|
||||
| 11 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match |
|
||||
| 12 | CONTAINS | JSON | Whether a key exists in JSON |
|
||||
| 12 | REGEXP, NOT REGEXP | BINARY, NCHAR, and VARCHAR | Regular expression match |
|
||||
| 13 | CONTAINS | JSON | Whether a key exists in JSON |
|
||||
|
||||
LIKE conditions use wildcard strings for matching checks, with the following rules:
|
||||
|
||||
|
@ -51,7 +52,7 @@ LIKE conditions use wildcard strings for matching checks, with the following rul
|
|||
- If you want to match an underscore character that is originally in the string, you can write it as \_ in the wildcard string, i.e., add a backslash to escape it.
|
||||
- The wildcard string cannot exceed 100 bytes in length. It is not recommended to use too long wildcard strings, as it may severely affect the performance of the LIKE operation.
|
||||
|
||||
MATCH and NMATCH conditions use regular expressions for matching, with the following rules:
|
||||
MATCH/REGEXP and NMATCH/NOT REGEXP conditions use regular expressions for matching, with the following rules:
|
||||
|
||||
- Supports regular expressions that comply with the POSIX standard, see Regular Expressions for specific standards.
|
||||
- When MATCH matches a regular expression, it returns TRUE. When NMATCH does not match a regular expression, it returns TRUE.
|
||||
|
|
|
@ -45,7 +45,7 @@ ALTER ALL DNODES dnode_option
|
|||
|
||||
For configuration parameters that support dynamic modification, you can use the ALTER DNODE or ALTER ALL DNODES syntax to modify the values of configuration parameters in a dnode. Starting from version 3.3.4.0, the modified configuration parameters will be automatically persisted and will remain effective even after the database service is restarted.
|
||||
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](../01-components/01-taosd.md)
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](/tdengine-reference/components/taosd/)
|
||||
|
||||
The value is the parameter's value and needs to be in character format. For example, to change the log output level of dnode 1 to debug:
|
||||
|
||||
|
@ -130,7 +130,7 @@ ALTER LOCAL local_option
|
|||
|
||||
You can use the above syntax to modify the client's configuration parameters, and there is no need to restart the client. The changes take effect immediately.
|
||||
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](../01-components/02-taosc.md)
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](/tdengine-reference/components/taosc/)
|
||||
|
||||
|
||||
## View Client Configuration
|
||||
|
|
|
@ -304,9 +304,10 @@ Displays information about all topics in the current database.
|
|||
|
||||
```sql
|
||||
SHOW TRANSACTIONS;
|
||||
SHOW TRANSACTION [tranaction_id];
|
||||
```
|
||||
|
||||
Displays information about transactions currently being executed in the system (these transactions are only for metadata level, not for regular tables).
|
||||
Displays information about one of or all transaction(s) currently being executed in the system (these transactions are only for metadata level, not for regular tables).
|
||||
|
||||
## SHOW USERS
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE
|
|||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
||||
```
|
||||
|
||||
About how to develop custom functions, please refer to [UDF Usage Instructions](../../../developer-guide/user-defined-functions/).
|
||||
About how to develop custom functions, please refer to [UDF Usage Instructions](/developer-guide/user-defined-functions/).
|
||||
|
||||
## Manage UDF
|
||||
|
||||
|
|
|
@ -4,7 +4,11 @@ title: Time-Range Small Materialized Aggregates (TSMAs)
|
|||
slug: /tdengine-reference/sql-manual/manage-tsmas
|
||||
---
|
||||
|
||||
To improve the performance of aggregate function queries with large data volumes, window pre-aggregation (TSMA Time-Range Small Materialized Aggregates) objects are created. By using fixed time windows to pre-calculate specified aggregate functions and storing the results, query performance is enhanced by querying these pre-calculated results.
|
||||
In scenarios with large amounts of data, it is often necessary to query summary results for a certain period. As historical data increases or the time range expands, query time will also increase accordingly. By using materialized aggregation, the calculation results can be stored in advance, allowing subsequent queries to directly read the aggregated results without scanning the original data, such as the SMA (Small Materialized Aggregates) information within the current block.
|
||||
|
||||
The SMA information within a block has a small granularity. If the query time range is in days, months, or even years, the number of blocks will be large. Therefore, TSMA (Time-Range Small Materialized Aggregates) supports users to specify a time window for materialized aggregation. By pre-calculating the data within a fixed time window and storing the calculation results, queries can be performed on the pre-calculated results to improve query performance.
|
||||
|
||||

|
||||
|
||||
## Creating TSMA
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 109 KiB |
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: C/C++
|
||||
title: C/C++ Client Library
|
||||
slug: /tdengine-reference/client-libraries/cpp
|
||||
|
@ -508,8 +509,7 @@ For the OpenTSDB text protocol, the parsing of timestamps follows its official p
|
|||
- **Interface Description**: Used for polling to consume data. Each consumer can only call this interface in a single thread.
|
||||
- tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object.
|
||||
- timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a WS_RES structure, which contains the received message. `NULL`: Failure, indicates no data. WS_RES results are consistent with taos_query results, and information in WS_RES can be obtained through various query interfaces, such as schema, etc.
|
||||
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a WS_RES structure, which contains the received message. `NULL`: indicates no data, the error code can be obtained through ws_errno (NULL), please refer to the reference manual for specific error message. WS_RES results are consistent with taos_query results, and information in WS_RES can be obtained through various query interfaces, such as schema, etc.
|
||||
- `int32_t ws_tmq_consumer_close(ws_tmq_t *tmq)`
|
||||
- **Interface Description**: Used to close the ws_tmq_t structure. Must be used in conjunction with ws_tmq_consumer_new.
|
||||
- tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object.
|
||||
|
@ -1194,7 +1194,7 @@ In addition to using SQL or parameter binding APIs to insert data, you can also
|
|||
- **Interface Description**: Used to poll for consuming data, each consumer can only call this interface in a single thread.
|
||||
- tmq: [Input] Points to a valid tmq_t structure pointer, representing a TMQ consumer object.
|
||||
- timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_RES structure containing the received messages. `NULL`: Failure, indicates no data. TAOS_RES results are consistent with taos_query results, and information in TAOS_RES can be obtained through various query interfaces, such as schema, etc.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_RES structure containing the received messages. `NULL`: indicates no data, the error code can be obtained through taos_errno (NULL), please refer to the reference manual for specific error message. TAOS_RES results are consistent with taos_query results, and information in TAOS_RES can be obtained through various query interfaces, such as schema, etc.
|
||||
|
||||
- `int32_t tmq_consumer_close(tmq_t *tmq)`
|
||||
- **Interface Description**: Used to close a tmq_t structure. Must be used in conjunction with tmq_consumer_new.
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Java
|
||||
title: Java Client Library
|
||||
slug: /tdengine-reference/client-libraries/java
|
||||
|
@ -30,33 +31,37 @@ The JDBC driver implementation for TDengine strives to be consistent with relati
|
|||
|
||||
## Version History
|
||||
|
||||
| taos-jdbcdriver Version | Major Changes | TDengine Version |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||
| 3.3.4 | Fixed getInt error when data type is float. | - |
|
||||
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
|
||||
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
|
||||
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
|
||||
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
|
||||
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
|
||||
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
|
||||
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
|
||||
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
||||
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
|
||||
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
||||
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
|
||||
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
||||
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
||||
| 3.2.0 | Connection issues, not recommended for use. | - |
|
||||
| 3.1.0 | WebSocket connection supports subscription function. | - |
|
||||
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
|
||||
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
||||
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
|
||||
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
|
||||
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
|
||||
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
|
||||
| 2.0.37 | Added support for json tag. | - |
|
||||
| 2.0.36 | Added support for schemaless writing. | - |
|
||||
| taos-jdbcdriver Version | Major Changes | TDengine Version |
|
||||
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
|
||||
| 3.5.3 | Support unsigned data types in WebSocket connections. | - |
|
||||
| 3.5.2 | Fixed WebSocket result set free bug. | - |
|
||||
| 3.5.1 | Fixed the getObject issue in data subscription. | - |
|
||||
| 3.5.0 | 1. Optimized the performance of WebSocket connection parameter binding, supporting parameter binding queries using binary data. <br/> 2. Optimized the performance of small queries in WebSocket connection. <br/> 3. Added support for setting time zone and app info on WebSocket connection. | 3.3.5.0 and higher |
|
||||
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||
| 3.3.4 | Fixed getInt error when data type is float. | - |
|
||||
| 3.3.3 | Fixed memory leak caused by closing WebSocket statement. | - |
|
||||
| 3.3.2 | 1. Optimized parameter binding performance under WebSocket connection. <br/> 2. Improved support for mybatis. | - |
|
||||
| 3.3.0 | 1. Optimized data transmission performance under WebSocket connection. <br/> 2. Supports skipping SSL verification, off by default. | 3.3.2.0 and higher |
|
||||
| 3.2.11 | Fixed a bug in closing result set in Native connection. | - |
|
||||
| 3.2.10 | 1. REST/WebSocket connections support data compression during transmission. <br/> 2. WebSocket automatic reconnection mechanism, off by default. <br/> 3. Connection class provides methods for schemaless writing. <br/> 4. Optimized data fetching performance for native connections. <br/> 5. Fixed some known issues. <br/> 6. Metadata retrieval functions can return a list of supported functions. | - |
|
||||
| 3.2.9 | Fixed bug in closing WebSocket prepareStatement. | - |
|
||||
| 3.2.8 | 1. Optimized auto-commit. <br/> 2. Fixed manual commit bug in WebSocket. <br/> 3. Optimized WebSocket prepareStatement using a single connection. <br/> 4. Metadata supports views. | - |
|
||||
| 3.2.7 | 1. Supports VARBINARY and GEOMETRY types. <br/> 2. Added timezone setting support for native connections. <br/> 3. Added WebSocket automatic reconnection feature. | 3.2.0.0 and higher |
|
||||
| 3.2.5 | Data subscription adds committed() and assignment() methods. | 3.1.0.3 and higher |
|
||||
| 3.2.4 | Data subscription adds enable.auto.commit parameter under WebSocket connection, as well as unsubscribe() method. | - |
|
||||
| 3.2.3 | Fixed ResultSet data parsing failure in some cases. | - |
|
||||
| 3.2.2 | New feature: Data subscription supports seek function. | 3.0.5.0 and higher |
|
||||
| 3.2.1 | 1. WebSocket connection supports schemaless and prepareStatement writing. <br/> 2. Consumer poll returns result set as ConsumerRecord, which can be accessed through value() method. | 3.0.3.0 and higher |
|
||||
| 3.2.0 | Connection issues, not recommended for use. | - |
|
||||
| 3.1.0 | WebSocket connection supports subscription function. | - |
|
||||
| 3.0.1 - 3.0.4 | Fixed data parsing errors in result sets under some conditions. 3.0.1 compiled in JDK 11 environment, other versions recommended for JDK 8. | - |
|
||||
| 3.0.0 | Supports TDengine 3.0 | 3.0.0.0 and higher |
|
||||
| 2.0.42 | Fixed wasNull interface return value in WebSocket connection. | - |
|
||||
| 2.0.41 | Fixed username and password encoding method in REST connection. | - |
|
||||
| 2.0.39 - 2.0.40 | Added REST connection/request timeout settings. | - |
|
||||
| 2.0.38 | JDBC REST connection adds batch fetching function. | - |
|
||||
| 2.0.37 | Added support for json tag. | - |
|
||||
| 2.0.36 | Added support for schemaless writing. | - |
|
||||
|
||||
## Exceptions and Error Codes
|
||||
|
||||
|
@ -75,47 +80,47 @@ The error codes that the JDBC connector may report include 4 types:
|
|||
|
||||
Please refer to the specific error codes:
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | The connection is already closed, check the connection status, or recreate the connection to execute related commands. |
|
||||
| 0x2302 | this operation is NOT supported currently! | The current interface is not supported, consider switching to another connection method. |
|
||||
| 0x2303 | invalid variables | Invalid parameters, please check the interface specifications and adjust the parameter types and sizes. |
|
||||
| 0x2304 | statement is closed | The statement is already closed, check if the statement was used after being closed, or if the connection is normal. |
|
||||
| 0x2305 | resultSet is closed | The resultSet has been released, check if the resultSet was used after being released. |
|
||||
| 0x2306 | Batch is empty! | Add parameters to prepareStatement before executing executeBatch. |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | Use executeUpdate() for update operations, not executeQuery(). |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | Use executeQuery() for query operations, not executeUpdate(). |
|
||||
| 0x230d | parameter index out of range | Parameter out of bounds, check the reasonable range of parameters. |
|
||||
| 0x230e | connection already closed | The connection is already closed, check if the Connection was used after being closed, or if the connection is normal. |
|
||||
| 0x230f | unknown sql type in tdengine | Check the Data Type types supported by TDengine. |
|
||||
| 0x2310 | can't register JDBC-JNI driver | Cannot register JNI driver, check if the url is correctly filled. |
|
||||
| 0x2312 | url is not set | Check if the REST connection url is correctly filled. |
|
||||
| 0x2314 | numeric value out of range | Check if the correct interface was used for numeric types in the result set. |
|
||||
| 0x2315 | unknown taos type in tdengine | When converting TDengine data types to JDBC data types, check if the correct TDengine data type was specified. |
|
||||
| 0x2317 | | Incorrect request type used in REST connection. |
|
||||
| 0x2318 | | Data transmission error occurred in REST connection, check the network situation and retry. |
|
||||
| 0x2319 | user is required | Username information is missing when creating a connection. |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection. |
|
||||
| 0x231c | httpEntity is null, sql: | An exception occurred in REST connection execution. |
|
||||
| 0x231d | can't create connection with server within | Increase the httpConnectTimeout parameter to extend the connection time, or check the connection with taosAdapter. |
|
||||
| 0x231e | failed to complete the task within the specified time | Increase the messageWaitTimeout parameter to extend the execution time, or check the connection with taosAdapter. |
|
||||
| 0x2350 | unknown error | Unknown exception, please provide feedback to the developers on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set was specified in the local connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurred while executing prepareStatement in local connection, check taos log for troubleshooting. |
|
||||
| 0x2354 | JNI connection is NULL | The Connection was already closed when executing commands in local connection. Check the connection with TDengine. |
|
||||
| 0x2355 | JNI result set is NULL | The result set is abnormal in local connection, check the connection and retry. |
|
||||
| 0x2356 | invalid num of fields | The meta information of the result set obtained in local connection does not match. |
|
||||
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation error in local connection, check taos log for troubleshooting. |
|
||||
| 0x2371 | consumer properties must not be null! | Parameters are null when creating a subscription, fill in the correct parameters. |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains empty values, fill in the correct parameters. |
|
||||
| 0x2373 | failed to set consumer property, | The parameter value contains empty values, fill in the correct parameters. |
|
||||
| 0x2375 | topic reference has been destroyed | During the data subscription process, the topic reference was released. Check the connection with TDengine. |
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | The connection is already closed, check the connection status, or recreate the connection to execute related commands. |
|
||||
| 0x2302 | this operation is NOT supported currently! | The current interface is not supported, consider switching to another connection method. |
|
||||
| 0x2303 | invalid variables | Invalid parameters, please check the interface specifications and adjust the parameter types and sizes. |
|
||||
| 0x2304 | statement is closed | The statement is already closed, check if the statement was used after being closed, or if the connection is normal. |
|
||||
| 0x2305 | resultSet is closed | The resultSet has been released, check if the resultSet was used after being released. |
|
||||
| 0x2306 | Batch is empty! | Add parameters to prepareStatement before executing executeBatch. |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | Use executeUpdate() for update operations, not executeQuery(). |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | Use executeQuery() for query operations, not executeUpdate(). |
|
||||
| 0x230d | parameter index out of range | Parameter out of bounds, check the reasonable range of parameters. |
|
||||
| 0x230e | connection already closed | The connection is already closed, check if the Connection was used after being closed, or if the connection is normal. |
|
||||
| 0x230f | unknown sql type in tdengine | Check the Data Type types supported by TDengine. |
|
||||
| 0x2310 | can't register JDBC-JNI driver | Cannot register JNI driver, check if the url is correctly filled. |
|
||||
| 0x2312 | url is not set | Check if the REST connection url is correctly filled. |
|
||||
| 0x2314 | numeric value out of range | Check if the correct interface was used for numeric types in the result set. |
|
||||
| 0x2315 | unknown taos type in tdengine | When converting TDengine data types to JDBC data types, check if the correct TDengine data type was specified. |
|
||||
| 0x2317 | | Incorrect request type used in REST connection. |
|
||||
| 0x2318 | | Data transmission error occurred in REST connection, check the network situation and retry. |
|
||||
| 0x2319 | user is required | Username information is missing when creating a connection. |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection. |
|
||||
| 0x231c | httpEntity is null, sql: | An exception occurred in REST connection execution. |
|
||||
| 0x231d | can't create connection with server within | Increase the httpConnectTimeout parameter to extend the connection time, or check the connection with taosAdapter. |
|
||||
| 0x231e | failed to complete the task within the specified time | Increase the messageWaitTimeout parameter to extend the execution time, or check the connection with taosAdapter. |
|
||||
| 0x2350 | unknown error | Unknown exception, please provide feedback to the developers on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set was specified in the local connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurred while executing prepareStatement in local connection, check taos log for troubleshooting. |
|
||||
| 0x2354 | JNI connection is NULL | The Connection was already closed when executing commands in local connection. Check the connection with TDengine. |
|
||||
| 0x2355 | JNI result set is NULL | The result set is abnormal in local connection, check the connection and retry. |
|
||||
| 0x2356 | invalid num of fields | The meta information of the result set obtained in local connection does not match. |
|
||||
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation error in local connection, check taos log for troubleshooting. |
|
||||
| 0x2371 | consumer properties must not be null! | Parameters are null when creating a subscription, fill in the correct parameters. |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains empty values, fill in the correct parameters. |
|
||||
| 0x2373 | failed to set consumer property, | The parameter value contains empty values, fill in the correct parameters. |
|
||||
| 0x2375 | topic reference has been destroyed | During the data subscription process, the topic reference was released. Check the connection with TDengine. |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | During the data subscription process, the subscription topic name is empty. Check if the specified topic name is correctly filled. |
|
||||
| 0x2377 | consumer reference has been destroyed | The data transmission channel for the subscription has been closed, check the connection with TDengine. |
|
||||
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
|
||||
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
|
||||
| 0x2377 | consumer reference has been destroyed | The data transmission channel for the subscription has been closed, check the connection with TDengine. |
|
||||
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
|
||||
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
|
||||
|
||||
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -124,24 +129,27 @@ Please refer to the specific error codes:
|
|||
|
||||
TDengine currently supports timestamp, numeric, character, boolean types, and the corresponding Java type conversions are as follows:
|
||||
|
||||
| TDengine DataType | JDBCType |
|
||||
| ----------------- | ------------------ |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
| JSON | java.lang.String |
|
||||
| VARBINARY | byte[] |
|
||||
| GEOMETRY | byte[] |
|
||||
| TDengine DataType | JDBCType | Remark |
|
||||
| ----------------- | -------------------- | --------------------------------------- |
|
||||
| TIMESTAMP | java.sql.Timestamp | |
|
||||
| BOOL | java.lang.Boolean | |
|
||||
| TINYINT | java.lang.Byte | |
|
||||
| TINYINT UNSIGNED | java.lang.Short | only supported in WebSocket connections |
|
||||
| SMALLINT | java.lang.Short | |
|
||||
| SMALLINT UNSIGNED | java.lang.Integer | only supported in WebSocket connections |
|
||||
| INT | java.lang.Integer | |
|
||||
| INT UNSIGNED | java.lang.Long | only supported in WebSocket connections |
|
||||
| BIGINT | java.lang.Long | |
|
||||
| BIGINT UNSIGNED | java.math.BigInteger | only supported in WebSocket connections |
|
||||
| FLOAT | java.lang.Float | |
|
||||
| DOUBLE | java.lang.Double | |
|
||||
| BINARY | byte array | |
|
||||
| NCHAR | java.lang.String | |
|
||||
| JSON | java.lang.String | only supported in tags |
|
||||
| VARBINARY | byte[] | |
|
||||
| GEOMETRY | byte[] | |
|
||||
|
||||
**Note**: JSON type is only supported in tags.
|
||||
Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead.
|
||||
**Note**: Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead.
|
||||
GEOMETRY type is binary data in little endian byte order, complying with the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/)
|
||||
For the WKB standard, please refer to [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)
|
||||
For the Java connector, you can use the jts library to conveniently create GEOMETRY type objects, serialize them, and write to TDengine. Here is an example [Geometry Example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java)
|
||||
|
@ -244,13 +252,13 @@ For WebSocket connections, the configuration parameters in the URL are as follow
|
|||
|
||||
- user: Login username for TDengine, default value 'root'.
|
||||
- password: User login password, default value 'taosdata'.
|
||||
- charset: Specifies the character set for parsing string data when batch fetching is enabled.
|
||||
- batchErrorIgnore: true: Continues executing the following SQL if one SQL fails during the execution of Statement's executeBatch. false: Does not execute any statements after a failed SQL. Default value: false.
|
||||
- httpConnectTimeout: Connection timeout in ms, default value 60000.
|
||||
- messageWaitTimeout: Message timeout in ms, default value 60000.
|
||||
- useSSL: Whether SSL is used in the connection.
|
||||
- timezone: Client timezone, default is the system current timezone. Recommended not to set, using the system time zone provides better performance.
|
||||
|
||||
**Note**: Some configuration items (such as: locale, timezone) do not take effect in WebSocket connections.
|
||||
**Note**: Some configuration items (such as: locale, charset) do not take effect in WebSocket connections.
|
||||
|
||||
**REST Connection**
|
||||
Using JDBC REST connection does not depend on the client driver. Compared to native JDBC connections, you only need to:
|
||||
|
@ -263,14 +271,13 @@ For REST connections, the configuration parameters in the URL are as follows:
|
|||
|
||||
- user: Login username for TDengine, default value 'root'.
|
||||
- password: User login password, default value 'taosdata'.
|
||||
- charset: Specifies the character set for parsing string data when batch fetching is enabled.
|
||||
- batchErrorIgnore: true: Continues executing the following SQL if one SQL fails during the execution of Statement's executeBatch. false: Does not execute any statements after a failed SQL. Default value: false.
|
||||
- httpConnectTimeout: Connection timeout in ms, default value 60000.
|
||||
- httpSocketTimeout: Socket timeout in ms, default value 60000.
|
||||
- useSSL: Whether SSL is used in the connection.
|
||||
- httpPoolSize: REST concurrent request size, default 20.
|
||||
|
||||
**Note**: Some configuration items (such as: locale, timezone) do not take effect in REST connections.
|
||||
**Note**: Some configuration items (such as: locale, charset and timezone) do not take effect in REST connections.
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -294,7 +301,9 @@ The configuration parameters in properties are as follows:
|
|||
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: Effective only when using native JDBC connections. Client configuration file directory path, default value on Linux OS is `/etc/taos`, on Windows OS is `C:/TDengine/cfg`.
|
||||
- TSDBDriver.PROPERTY_KEY_CHARSET: Character set used by the client, default value is the system character set.
|
||||
- TSDBDriver.PROPERTY_KEY_LOCALE: Effective only when using native JDBC connections. Client locale, default value is the current system locale.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: Effective only when using native JDBC connections. Client time zone, default value is the current system time zone. Due to historical reasons, we only support part of the POSIX standard, such as UTC-8 (representing Shanghai, China), GMT-8, Asia/Shanghai.
|
||||
- TSDBDriver.PROPERTY_KEY_TIME_ZONE:
|
||||
- Native connections: Client time zone, default value is the current system time zone. Effective globally. Due to historical reasons, we only support part of the POSIX standard, such as UTC-8 (representing Shanghai, China), GMT-8, Asia/Shanghai.
|
||||
- WebSocket connections. Client time zone, default value is the current system time zone. Effective on the connection. Only IANA time zones are supported, such as Asia/Shanghai. It is recommended not to set this parameter, as using the system time zone provides better performance.
|
||||
- TSDBDriver.HTTP_CONNECT_TIMEOUT: Connection timeout, in ms, default value is 60000. Effective only in REST connections.
|
||||
- TSDBDriver.HTTP_SOCKET_TIMEOUT: Socket timeout, in ms, default value is 60000. Effective only in REST connections and when batchfetch is set to false.
|
||||
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: Message timeout, in ms, default value is 60000. Effective only under WebSocket connections.
|
||||
|
@ -303,12 +312,14 @@ The configuration parameters in properties are as follows:
|
|||
- TSDBDriver.PROPERTY_KEY_ENABLE_COMPRESSION: Whether to enable compression during transmission. Effective only when using REST/WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
- TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT: Whether to enable auto-reconnect. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
|
||||
> **Note**: Enabling auto-reconnect is only effective for simple SQL execution, schema-less writing, and data subscription. It is ineffective for parameter binding. Auto-reconnect is only effective for connections established through parameters specifying the database, and ineffective for later `use db` statements to switch databases.
|
||||
> **Note**: Enabling auto-reconnect is only effective for simple SQL execution, schema-less writing, and data subscription. It is ineffective for parameter binding. Auto-reconnect is only effective for connections established through parameters specifying the database, and ineffective for later `use db` statements to switch databases.
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_INTERVAL_MS: Auto-reconnect retry interval, in milliseconds, default value 2000. Effective only when PROPERTY_KEY_ENABLE_AUTO_RECONNECT is true.
|
||||
- TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT: Auto-reconnect retry count, default value 3, effective only when PROPERTY_KEY_ENABLE_AUTO_RECONNECT is true.
|
||||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty.
|
||||
|
||||
Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties.
|
||||
|
||||
**Priority of Configuration Parameters**
|
||||
|
@ -489,16 +500,16 @@ For example: if the password is specified as taosdata in the URL and as taosdemo
|
|||
|
||||
List of interface methods that return `true` for supported features, others not explicitly mentioned return `false`.
|
||||
|
||||
| Interface Method | Description |
|
||||
|--------------------------------------------------------|-----------------------------------------------------|
|
||||
| `boolean nullsAreSortedAtStart()` | Determines if `NULL` values are sorted at the start |
|
||||
| `boolean storesLowerCaseIdentifiers()` | Determines if the database stores identifiers in lowercase |
|
||||
| `boolean supportsAlterTableWithAddColumn()` | Determines if the database supports adding columns with `ALTER TABLE` |
|
||||
| `boolean supportsAlterTableWithDropColumn()` | Determines if the database supports dropping columns with `ALTER TABLE` |
|
||||
| `boolean supportsColumnAliasing()` | Determines if the database supports column aliasing |
|
||||
| `boolean supportsGroupBy()` | Determines if the database supports `GROUP BY` statements |
|
||||
| `boolean isCatalogAtStart()` | Determines if the catalog name appears at the start of the fully qualified name in the database |
|
||||
| `boolean supportsCatalogsInDataManipulation()` | Determines if the database supports catalog names in data manipulation statements |
|
||||
| Interface Method | Description |
|
||||
| ---------------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| `boolean nullsAreSortedAtStart()` | Determines if `NULL` values are sorted at the start |
|
||||
| `boolean storesLowerCaseIdentifiers()` | Determines if the database stores identifiers in lowercase |
|
||||
| `boolean supportsAlterTableWithAddColumn()` | Determines if the database supports adding columns with `ALTER TABLE` |
|
||||
| `boolean supportsAlterTableWithDropColumn()` | Determines if the database supports dropping columns with `ALTER TABLE` |
|
||||
| `boolean supportsColumnAliasing()` | Determines if the database supports column aliasing |
|
||||
| `boolean supportsGroupBy()` | Determines if the database supports `GROUP BY` statements |
|
||||
| `boolean isCatalogAtStart()` | Determines if the catalog name appears at the start of the fully qualified name in the database |
|
||||
| `boolean supportsCatalogsInDataManipulation()` | Determines if the database supports catalog names in data manipulation statements |
|
||||
|
||||
### Connection Features
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Go
|
||||
title: Go Client Library
|
||||
slug: /tdengine-reference/client-libraries/go
|
||||
|
@ -21,24 +22,25 @@ Supports Go 1.14 and above.
|
|||
|
||||
## Version History
|
||||
|
||||
| driver-go Version | Major Changes | TDengine Version |
|
||||
|------------------|------------------------------------------------------------------|-------------------|
|
||||
| v3.5.8 | Fixed null pointer exception. | - |
|
||||
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
|
||||
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
|
||||
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
|
||||
| v3.5.3 | Refactored taosWS. | - |
|
||||
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
|
||||
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
|
||||
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
|
||||
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
|
||||
| v3.1.0 | Provided Kafka-like subscription API. | - |
|
||||
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
|
||||
| v3.0.3 | Websocket-based statement insert. | - |
|
||||
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
|
||||
| v3.0.1 | Websocket-based message subscription. | - |
|
||||
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
|
||||
| driver-go Version | Major Changes | TDengine Version |
|
||||
|-------------------|-------------------------------------------------------------------------------------------------|--------------------|
|
||||
| v3.6.0 | stmt2 native interface, DSN supports passwords containing special characters (url.QueryEscape). | 3.3.5.0 and higher |
|
||||
| v3.5.8 | Fixed null pointer exception. | - |
|
||||
| v3.5.7 | taosWS and taosRestful support passing request id. | - |
|
||||
| v3.5.6 | Improved websocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| v3.5.5 | Restful supports skipping SSL certificate check. | - |
|
||||
| v3.5.4 | Compatible with TDengine 3.3.0.0 tmq raw data. | - |
|
||||
| v3.5.3 | Refactored taosWS. | - |
|
||||
| v3.5.2 | Websocket compression and optimized tmq subscription performance. | 3.2.3.0 and higher |
|
||||
| v3.5.1 | Native stmt query and geometry type support. | 3.2.1.0 and higher |
|
||||
| v3.5.0 | Support tmq get assignment and seek offset. | 3.0.5.0 and higher |
|
||||
| v3.3.1 | Schemaless protocol insert based on websocket. | 3.0.4.1 and higher |
|
||||
| v3.1.0 | Provided Kafka-like subscription API. | - |
|
||||
| v3.0.4 | Added request id related interfaces. | 3.0.2.2 and higher |
|
||||
| v3.0.3 | Websocket-based statement insert. | - |
|
||||
| v3.0.2 | Websocket-based data query and insert. | 3.0.1.5 and higher |
|
||||
| v3.0.1 | Websocket-based message subscription. | - |
|
||||
| v3.0.0 | Adapted to TDengine 3.0 query and insert. | 3.0.0.0 and higher |
|
||||
|
||||
|
||||
## Exceptions and Error Codes
|
||||
|
@ -136,6 +138,8 @@ Full form of DSN:
|
|||
username:password@protocol(address)/dbname?param=value
|
||||
```
|
||||
|
||||
When the password contains special characters, it needs to be escaped using url.QueryEscape.
|
||||
|
||||
##### Native Connection
|
||||
|
||||
Import the driver:
|
||||
|
@ -493,6 +497,43 @@ The `af` package provides more interfaces using native connections for parameter
|
|||
* **Interface Description**: Closes the statement.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
From version 3.6.0, the `stmt2` interface for binding parameters is provided.
|
||||
|
||||
* `func (conn *Connector) Stmt2(reqID int64, singleTableBindOnce bool) *Stmt2`
|
||||
* **Interface Description**: Returns a Stmt2 object bound to this connection.
|
||||
* **Parameter Description**:
|
||||
* `reqID`: Request ID.
|
||||
* `singleTableBindOnce`: Indicates whether a single child table is bound only once during a single execution.
|
||||
* **Return Value**: Stmt2 object.
|
||||
|
||||
* `func (s *Stmt2) Prepare(sql string) error`
|
||||
* **Interface Description**: Prepares an SQL.
|
||||
* **Parameter Description**:
|
||||
* `sql`: The statement for parameter binding.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
* `func (s *Stmt2) Bind(params []*stmt.TaosStmt2BindData) error`
|
||||
* **Interface Description**: Binds data to the prepared statement.
|
||||
* **Parameter Description**:
|
||||
* `params`: The data to bind.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
* `func (s *Stmt2) Execute() error`
|
||||
* **Interface Description**: Executes the batch.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
* `func (s *Stmt2) GetAffectedRows() int`
|
||||
* **Interface Description**: Gets the number of affected rows (only valid for insert statements).
|
||||
* **Return Value**: Number of affected rows.
|
||||
|
||||
* `func (s *Stmt2) UseResult() (driver.Rows, error)`
|
||||
* **Interface Description**: Retrieves the result set (only valid for query statements).
|
||||
* **Return Value**: Result set Rows object, error information.
|
||||
|
||||
* `func (s *Stmt2) Close() error`
|
||||
* **Interface Description**: Closes the statement.
|
||||
* **Return Value**: Error information.
|
||||
|
||||
The `ws/stmt` package provides interfaces for parameter binding via WebSocket
|
||||
|
||||
* `func (c *Connector) Init() (*Stmt, error)`
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Rust
|
||||
title: Rust Client Library
|
||||
slug: /tdengine-reference/client-libraries/rust
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Python
|
||||
title: Python Client Library
|
||||
slug: /tdengine-reference/client-libraries/python
|
||||
|
@ -49,12 +50,14 @@ Supports Python 3.0 and above.
|
|||
-The platforms supported by native connections are consistent with those supported by the TDengine client driver.
|
||||
-WebSocket/REST connections support all platforms that can run Python.
|
||||
|
||||
## Versions History
|
||||
## Version History
|
||||
|
||||
Python Connector historical versions (it is recommended to use the latest version of 'taopsy'):
|
||||
|
||||
|Python Connector Version | Major Changes | TDengine Version|
|
||||
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- |
|
||||
|2.7.21 | Native supports STMT2 writing | - |
|
||||
|2.7.19 | Support Apache Superset connection to TDengine Cloud data source | - |
|
||||
|2.7.18 | Support Apache SuperSet BI Tools. | - |
|
||||
|2.7.16 | Add subscription configuration (session. timeout. ms, Max. roll. interval. ms). | - |
|
||||
|2.7.15 | Added support for VARBINRY and GEOMETRY types. | - |
|
||||
|
@ -136,7 +139,7 @@ TDengine currently supports timestamp, numeric, character, boolean types, and th
|
|||
| [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | tmq subscription |
|
||||
| [native_all_type_query.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_query.py) | Example supporting all types |
|
||||
| [native_all_type_stmt.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/native_all_type_stmt.py) | Parameter binding example supporting all types |
|
||||
|
||||
| [test_stmt2.py](https://github.com/taosdata/taos-connector-python/blob/main/tests/test_stmt2.py) | Example of STMT2 writing |
|
||||
Example program source code can be found at:
|
||||
|
||||
1. [More native example programs](https://github.com/taosdata/taos-connector-python/tree/main/examples)
|
||||
|
@ -429,51 +432,40 @@ TaosResult object can be iterated over to retrieve queried data.
|
|||
- **Exceptions**: Throws `SchemalessError` if operation fails.
|
||||
|
||||
#### Parameter Binding
|
||||
|
||||
- `def statement(self, sql=None)`
|
||||
- **Interface Description**: Creates a stmt object using the connection object, if sql is not empty it will call prepare.
|
||||
- `sql`: Precompiled SQL statement.
|
||||
- **Return Value**: stmt object.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def statement2(self, sql=None, option=None)`
|
||||
- **Interface Description**:Creating an STMT2 object using a connection object
|
||||
- **Parameter Description**
|
||||
- `sql`: The bound SQL statement will call the `prepare` function if it is not empty
|
||||
- `option` Pass in `TaoStmt2Option` class instance
|
||||
- **Return Value**:STMT2 object
|
||||
- **Exception**:Throws `ConnectionError` on failure
|
||||
- `def prepare(self, sql)`
|
||||
- **Interface Description**: Binds a precompiled sql statement.
|
||||
- **Parameter Description**:
|
||||
- `sql`: Precompiled SQL statement.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def set_tbname(self, name)`
|
||||
- **Interface Description**: Sets the table name for data to be written to.
|
||||
- **Parameter Description**:
|
||||
- `name`: Table name, if you need to specify a database, for example: `db_name.table_name`.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def set_tbname_tags(self, name, tags):`
|
||||
- **Interface Description**: Sets the table and Tags data, used for automatic table creation.
|
||||
- **Parameter Description**:
|
||||
- `name`: Table name, if you need to specify a database, for example: `db_name.table_name`.
|
||||
- `tags`: Tags data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def bind_param(self, params, add_batch=True)`
|
||||
- **Interface Description**: Binds a set of data and submits.
|
||||
- **Parameter Description**:
|
||||
- `params`: Data to bind.
|
||||
- `add_batch`: Whether to submit the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def bind_param_batch(self, binds, add_batch=True)`
|
||||
- **Interface Description**: Binds multiple sets of data and submits.
|
||||
- **Parameter Description**:
|
||||
- `binds`: Data to bind.
|
||||
- `add_batch`: Whether to submit the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def add_batch(self)`
|
||||
- **Interface Description**: Submits the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def execute(self)`
|
||||
- **Interface Description**: Executes and writes all the bound data.
|
||||
- **Exception**: Throws `StatementError` exception on failure.
|
||||
- `def affected_rows(self)`
|
||||
- **Interface Description**: Gets the number of rows written.
|
||||
- **Return Value**: Number of rows written.
|
||||
- `def close(&self)`
|
||||
- **Interface Description**: Closes the stmt object.
|
||||
- **Interface Description**:Bind a precompiled SQL statement
|
||||
- **Parameter Description**:
|
||||
- `sql`: Precompiled SQL statement
|
||||
- **Exception**:Throws `StatementError` on failure
|
||||
- `def bind_param(self, tbnames, tags, datas)`
|
||||
- **Interface Description**:Binding Data as an Independent Array
|
||||
- **Parameter Description**:
|
||||
- `tbnames`:Bind table name array, data type is list
|
||||
- `tags`: Bind tag column value array, data type is list
|
||||
- `datas`: Bind data column value array, data type of list
|
||||
- **Exception**:Throws `StatementError` on failure
|
||||
- `def bind_param_with_tables(self, tables)`
|
||||
- **Interface Description**:Bind data in an independent table format. Independent tables are organized by table units, with table name, TAG value, and data column attributes in table object
|
||||
- **Parameter Description**:
|
||||
- `tables`: `BindTable` Independent table object array
|
||||
- **Exception**:Throws `StatementError` on failure
|
||||
- `def execute(self) -> int:`
|
||||
- **Interface Description**:Execute to write all bound data
|
||||
- **Return Value**:Affects the number of rows
|
||||
- **Exception**:Throws `QueryError` on failure
|
||||
- `def result(self)`
|
||||
- **Interface Description**:Get parameter binding query result set
|
||||
- **Return Value**:Returns the TaosResult object
|
||||
- `def close(self)`
|
||||
- **Interface Description**: close the STMT2 object
|
||||
|
||||
|
||||
#### Data Subscription
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: Node.js
|
||||
title: Node.js Client Library
|
||||
slug: /tdengine-reference/client-libraries/node
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
toc_max_heading_level: 4
|
||||
sidebar_label: C#
|
||||
title: C# Client Library
|
||||
slug: /tdengine-reference/client-libraries/csharp
|
||||
|
@ -22,13 +23,14 @@ import RequestId from "../../assets/resources/_request_id.mdx";
|
|||
|
||||
## Version History
|
||||
|
||||
| Connector Version | Major Changes | TDengine Version |
|
||||
|------------------|-------------------------------------------------|-------------------|
|
||||
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
|
||||
| 3.1.2 | Fixed schemaless resource release. | - |
|
||||
| 3.1.1 | Supported varbinary and geometry types. | - |
|
||||
| 3.1.0 | WebSocket uses a native C# implementation. | 3.2.1.0 and higher |
|
||||
| Connector Version | Major Changes | TDengine Version |
|
||||
|-------------------|------------------------------------------------------------|--------------------|
|
||||
| 3.1.5 | Fix WebSocket encoding error for Chinese character length. | - |
|
||||
| 3.1.4 | Improved WebSocket query and insert performance. | 3.3.2.0 and higher |
|
||||
| 3.1.3 | Supported WebSocket auto-reconnect. | - |
|
||||
| 3.1.2 | Fixed schemaless resource release. | - |
|
||||
| 3.1.1 | Supported varbinary and geometry types. | - |
|
||||
| 3.1.0 | WebSocket uses a native C# implementation. | 3.2.1.0 and higher |
|
||||
|
||||
## Exceptions and Error Codes
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ In addition to this, the WebSocket connection method also supports 32-bit applic
|
|||
| v1.1.0 | 1. Supports view functionality. <br/>2. Supports VARBINARY/GEOMETRY data types. <br/>3. Supports ODBC 32-bit WebSocket connection method (Enterprise edition only). <br/>4. Supports ODBC data source configuration dialog settings for compatibility adaptation options for industrial software like KingSCADA, Kepware, etc. (Enterprise edition only). | 3.3.3.0 and higher |
|
||||
| v1.0.2 | Supports CP1252 character encoding. | 3.2.3.0 and higher |
|
||||
| v1.0.1 | 1. Supports DSN settings for BI mode, in BI mode TDengine database does not return system database and supertable subtable information. <br/>2. Refactored character set conversion module, improving read and write performance. <br/> 3. Default connection method in ODBC data source configuration dialog changed to "WebSocket". <br/>4. Added "Test Connection" control in ODBC data source configuration dialog. <br/>5. ODBC data source configuration supports Chinese/English interface. | - |
|
||||
| v1.0.0.0 | Initial release, supports interacting with Tdengine database to read and write data, refer to the "API Reference" section for details. | 3.2.2.0 and higher |
|
||||
| v1.0.0.0 | Initial release, supports interacting with TDengine database to read and write data, refer to the "API Reference" section for details. | 3.2.2.0 and higher |
|
||||
|
||||
## Data Type Mapping
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ Description:
|
|||
- code: (`int`) 0 represents success.
|
||||
- column_meta: (`[][3]any`) Column information, each column is described by three values: column name (string), column type (string), and type length (int).
|
||||
- rows: (`int`) Number of data return rows.
|
||||
- data: (`[][]any`) Specific data content (time format only supports RFC3339, result set for timezone 0).
|
||||
- data: (`[][]any`) Specific data content (time format only supports RFC3339, result set for timezone 0, when specifying tz, the corresponding time zone is returned).
|
||||
|
||||
Column types use the following strings:
|
||||
|
||||
|
@ -434,7 +434,6 @@ curl http://<fqnd>:<port>/rest/login/<username>/<password>
|
|||
|
||||
Here, `fqdn` is the FQDN or IP address of the TDengine database, `port` is the port number of the TDengine service, `username` is the database username, and `password` is the database password. The return is in JSON format, with the fields meaning as follows:
|
||||
|
||||
- status: Flag of the request result.
|
||||
- code: Return code.
|
||||
- desc: Authorization code.
|
||||
|
||||
|
|
|
@ -534,4 +534,6 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80004000 | Invalid message | The subscribed data is illegal, generally does not occur | Check the client-side error logs for details |
|
||||
| 0x80004001 | Consumer mismatch | The vnode requested for subscription and the reassigned vnode are inconsistent, usually occurs when new consumers join the same consumer group | Internal error, not exposed to users |
|
||||
| 0x80004002 | Consumer closed | The consumer no longer exists | Check if it has already been closed |
|
||||
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
|
||||
| 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs |
|
||||
|
||||
|
|
|
@ -297,3 +297,22 @@ Reporting this error indicates that the first connection to the cluster was succ
|
|||
|
||||
Therefore, first, check whether all ports on the server and cluster (default 6030 for native connections and 6041 for HTTP connections) are open; Next, check if the client's hosts file has configured the fqdn and IP information for all dnodes in the cluster.
|
||||
If the issue still cannot be resolved, it is necessary to contact Taos technical personnel for support.
|
||||
|
||||
### 32 Why is the original database lost and the cluster ID changed when the data directory dataDir of the database remains unchanged on the same server?
|
||||
Background: When the TDengine server process (taosd) starts, if there are no valid data file subdirectories (such as mnode, dnode, and vnode) under the data directory (dataDir, which is specified in the configuration file taos.cfg), these directories will be created automatically.When a new mnode directory is created, a new cluster ID will be allocated to generate a new cluster.
|
||||
|
||||
Cause analysis: The data directory dataDir of taosd can point to multiple different mount points.If these mount points are not configured for automatic mounting in the fstab file, after the server restarts, dataDir will only exist as a normal directory of the local disk, and it will not point to the mounted disk as expected.At this point, if the taosd service is started, it will create a new directory under dataDir to generate a new cluster.
|
||||
|
||||
Impact of the problem: After the server is restarted, the original database is lost (note: it is not really lost, but the original data disk is not attached and cannot be seen for the time being) and the cluster ID changes, resulting in the inability to access the original database. For enterprise users, if they have been authorized for the cluster ID, they will also find that the machine code of the cluster server has not changed, but the original authorization has expired.If the problem is not monitored or found and handled in time, the user will not notice that the original database has been lost, resulting in losses and increased operation and maintenance costs.
|
||||
|
||||
Problem solving: You should configure the automatic mount of the dataDir directory in the fstab file to ensure that the dataDir always points to the expected mount point and directory. At this point, restarting the server will retrieve the original database and cluster. In the subsequent version, we will develop a function to enable taosd to exit in the startup phase when it detects that the dataDir changes before and after startup, and provide corresponding error prompts.
|
||||
|
||||
### 33 How to solve MVCP1400.DLL loss when running TDengine on Windows platform?
|
||||
1. Reinstall Microsoft Visual C++ Redistributable: As msvcp140.dll is part of Microsoft Visual C++Redistributable, reinstalling this package usually resolves most issues. You can download the corresponding version from the official Microsoft website for installation
|
||||
2. Manually download and replace the msvcp140.dll file online: You can download the msvcp140.dll file from a reliable source and copy it to the corresponding directory in the system. Ensure that the downloaded files match your system architecture (32-bit or 64 bit) and ensure the security of the source
|
||||
|
||||
### 34 Which fast query data from super table with TAG filter or child table ?
|
||||
Directly querying from child table is fast. The query from super table with TAG filter is designed to meet the convenience of querying. It can filter data from multiple child tables at the same time. If the goal is to pursue performance and the child table has been clearly queried, directly querying from the sub table can achieve higher performance
|
||||
|
||||
### 35 How to view data compression ratio indicators?
|
||||
Currently, TDengine only provides compression ratios based on tables, not databases or the entire system. To view the compression ratios, execute the `SHOW TABLE DISTRIBUTED table_name;` command in the client taos-CLI. The table_name can be a super table, regular table, or subtable. For details [Click Here](https://docs.tdengine.com/tdengine-reference/sql-manual/show-commands/#show-table-distributed)
|
|
@ -25,6 +25,14 @@ Download links for TDengine 3.x version installation packages are as follows:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.3.5.2
|
||||
|
||||
<Release type="tdengine" version="3.3.5.2" />
|
||||
|
||||
## 3.3.5.0
|
||||
|
||||
<Release type="tdengine" version="3.3.5.0" />
|
||||
|
||||
## 3.3.4.8
|
||||
|
||||
<Release type="tdengine" version="3.3.4.8" />
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
title: TDengine 3.3.4.8 Release Notes
|
||||
sidebar_label: 3.3.4.8
|
||||
description: Version 3.3.4.8 Notes
|
||||
slug: /release-history/release-notes/3.3.4.8
|
||||
slug: /release-history/release-notes/3-3-4-8
|
||||
---
|
||||
|
||||
## New Features
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
---
|
||||
title: TDengine 3.3.5.0 Release Notes
|
||||
sidebar_label: 3.3.5.0
|
||||
description: Version 3.3.5.0 Notes
|
||||
slug: /release-history/release-notes/3-3-5-0
|
||||
---
|
||||
|
||||
## Features
|
||||
1. feat: refactor MQTT to improve stability and performance
|
||||
2. feat: refactor taosX incremental backup-restore
|
||||
3. feat: add stmt2 apis in JDBC via websocket connection
|
||||
4. feat: add stmt2 api in Rust connector
|
||||
5. feat: add error codes in error prompts in taos-CLI
|
||||
6. feat: superSet can connect TDengine with python connector
|
||||
7. feat: configurable grafana dashboards in explorer management
|
||||
8. feat: add taosX-agent in-memory cache queu capacity option
|
||||
|
||||
## Enhancements
|
||||
1. enh: adjust the reporting mechanism of telemetry.
|
||||
2. enh: support for SQL-based statistics of disk space for a specified DB.
|
||||
3. enh: add memory management for SQL queries on the server side
|
||||
4. enh: interval clause allows the use of the AUTO keyword to specify the window offset.
|
||||
5. enh: reduce the impact on data write performance during data migration across multi-level storage
|
||||
6. enh: migrate from angular to react for grafana 11.3+
|
||||
7. enh: refactor taosAdapter websocket api for a slightly better perf
|
||||
8. enh: add health state in taosX task status
|
||||
9. enh: taosX add configurations to handle exceptions
|
||||
10. enh: support setting options for client connections, including time zone, character set, user IP, and user name.
|
||||
11. enh: taosdump support retry after connection timeout or broken
|
||||
12. enh: allow creating index for tags that already subscribed
|
||||
13. enh: taosX now support literal special chars in password
|
||||
14. enh: improve data write performance when Last Cache is activated.
|
||||
15. enh: compact command supports automatic execution, concurrency setting, and progress observation.
|
||||
16. enh: support update global configuration parameters through SQL statements and persisting them.
|
||||
17. enh: update the default compression method for all data types to improve the compression ratio in most scenarios.
|
||||
18. enh: taosBenchmark --nodrop fix for mac/window
|
||||
19. enh: prohibit the simultaneous execution of DB compaction and replica change operations (Enterpris).
|
||||
20. enh: taosdump support primary key tables
|
||||
21. enh: display user IP and name in the results of the SHOW QUERIES and SHOW CONNECTIONS statements.
|
||||
22. enh: (JDBC)support batch insertion into multiple tables
|
||||
23. enh: support for dynamically modifying the dataDir parameter for multi-level storage.
|
||||
24. enh: prefer db file under data_dir
|
||||
25. enh: enforce users to set strong passwords, which must be 8 to 16 characters in length and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters.
|
||||
26. enh: improve the speed at which clients acquire the new Leader.
|
||||
27. enh: support negative regex pattern in opc point selector
|
||||
|
||||
## Fixes
|
||||
1. fix: the potential for deadlocks when updating checkpoints in stream computing under high-load scenarios.
|
||||
2. fix: write tmq data into target error when terrno already set
|
||||
3. fix: taosd cannot start when there is data corruption in a block within the WAL
|
||||
4. fix: taosBenchmark fails when taosd disconnected in replica 2/3
|
||||
5. fix: log files being lost when they are switched frequently.
|
||||
6. fix: the stream computing stops due to the data update within the window.
|
||||
7. fix: libtaosws.so sets an incorrect error code when the connection is terminated while fetch data.
|
||||
8. fix: taosX opc error in case of @-prefixed name
|
||||
9. fix: fix permission denied with show vgroups sql in cloud
|
||||
10. fix: fix sql syntax error when migrating from large stables with compress options
|
||||
11. fix: incorrect memory estimation for vnode usage
|
||||
12. fix: failed to perform UNION ALL query on constant strings of the varchar type.
|
||||
13. fix: leader transfer during the execution of transaction may cause deadlock.
|
||||
14. fix: rust connector invliad pointer addr in ws_stmt_get_tag_fields
|
||||
15. fix: union statement fails when executing with subqueries containing multiple NULLs.
|
||||
16. fix: the pause operation of stream computing might fail.
|
||||
17. fix: when writing data into a sub-table with a table name length of 192 characters using an SQL statement, errors may occur if the table name is enclosed in backticks (`).
|
||||
18. fix: when performing a join query on super tables across different databases, if each database contains only one vnode, the query will return an error.
|
||||
19. fix: no enough disk space cause taosX panic
|
||||
20. fix: when write data to a super table, using both bound and unbound simultaneously will trigger an exception.
|
||||
21. fix: metrics non-exist cause panic when connect with agent
|
||||
22. fix: when creating indexes for tag with a large character length, taosd may crash.
|
||||
23. fix: when the input parameters for the functions first, last, last_row, and char exceed 127, the taosd may crash. https://github.com/taosdata/TDengine/issues/29241
|
||||
24. fix: when the number of rows in the result set of the LIMIT statement exceeds the size of a single data block, the returned count does not match the expectation.
|
||||
25. fix: when synchronizing data between clusters, if the target task is deleted, the source cluster may run out of memory
|
||||
26. fix: metadata read-write lock misconfiguration leads to a very small chance of blocking writes.
|
||||
27. fix: when importing CSV files using the INSERT INTO statement on the Windows platform, the absence of a newline character at the end of the file may lead to an issue of infinite loop reading.
|
||||
28. fix: after the tags of the table are updated, the stream computing fails to recognize and apply the ne values.
|
||||
29. fix: fix kafka timeout issue and improve performance and stability
|
||||
30. fix: in sql queries, when both 'is null' and invalid 'in' filter conditions are included simultaneously, the query results are incorrect. https://github.com/taosdata/TDengine/issues/29067
|
||||
31. fix: sql queries containing both 'IN' and 'BETWEEN' filter conditions result in incorrect query results. https://github.com/taosdata/TDengine/issues/28989
|
||||
32. fix: when performing multiplication or division operations between timestamp and numeric types, the results are incorrect. https://github.com/taosdata/TDengine/issues/28339
|
||||
33. fix: data type conversion error in the IN statement leads to incorrect query results. https://github.com/taosdata/TDengine/issues/29047 https://github.com/taosdata/TDengine/issues/28902
|
||||
34. fix: the error in filtering results when constant conditions are combined with OR operators. https://github.com/taosdata/TDengine/issues/28904
|
||||
35. fix: when performing subtraction operation on timestamp type, the negative value is not considered. https://github.com/taosdata/TDengine/issues/28906
|
||||
36. fix: tag values may display incorrectly when using GROUP BY tag synatix
|
||||
37. fix: gcc < 10 bug cause taosX compile error
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
title: TDengine 3.3.5.2 Release Notes
|
||||
sidebar_label: 3.3.5.2
|
||||
description: Version 3.3.5.2 Notes
|
||||
slug: /release-history/release-notes/3.3.5.2
|
||||
---
|
||||
|
||||
## Features
|
||||
1. feat: taosX now support multiple stables with template for MQTT
|
||||
|
||||
## Enhancements
|
||||
1. enh: improve taosX error message if database is invalid
|
||||
2. enh: use poetry group depencencies and reduce dep when install [#251](https://github.com/taosdata/taos-connector-python/issues/251)
|
||||
3. enh: improve backup restore using taosX
|
||||
4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader
|
||||
5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later
|
||||
|
||||
## Fixes
|
||||
1. fix: the maxRetryWaitTime parameter is used to control the maximum reconnection timeout time for the client when the cluster is unable to provide services, but it does not take effect when encountering a Sync timeout error
|
||||
2. fix: supports immediate subscription to the new tag value after modifying the tag value of the sub-table
|
||||
3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails
|
||||
4. fix: taosd may crash when more than 100 views are created and the show views command is executed
|
||||
5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail
|
||||
6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail
|
||||
7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash
|
||||
8. fix: frequent execution of the “drop table with tb_uid” statement may lead to a deadlock in taosd
|
||||
9. fix: the potential deadlock during the switching of log files
|
||||
10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema)
|
||||
11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up
|
||||
12. fix: incorrect error reporting when attempting to write Geometry data types that do not conform to topological specifications through the STMT interface
|
||||
13. fix: when using the percentile function and session window in a query statement, if an error occurs, taosd may crash
|
||||
14. fix: the issue of being unable to dynamically modify system parameters
|
||||
15. fix: random error of tranlict transaction in replication
|
||||
16. fix: the same consumer executes the unsubscribe operation and immediately attempts to subscribe to other different topics, the subscription API will return an error
|
||||
17. fix: fix CVE-2022-28948 security issue in go connector
|
||||
18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error
|
||||
19. fix: when changing the database from a single replica to a mulit replica, if there are some metadata generated by earlier versions that are no longer used in the new version, the modification operation will fail
|
||||
20. fix: column names were not correctly copied when using SELECT * FROM subqueries
|
||||
21. fix: when performing max/min function on string type data, the results are inaccurate and taosd will crash
|
||||
22. fix: stream computing does not support the use of the HAVING clause, but no error is reported during creation
|
||||
23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition
|
||||
24. fix: in certain specific query scenarios, when JOIN and CAST are used together, taosd may crash
|
||||
|
|
@ -3,10 +3,9 @@ title: Release Notes
|
|||
slug: /release-history/release-notes
|
||||
---
|
||||
|
||||
[3.3.4.8](./3-3-4-8/)
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
|
||||
|
||||
[3.3.4.3](./3-3-4-3/)
|
||||
|
||||
[3.3.3.0](./3-3-3-0/)
|
||||
|
||||
[3.3.2.0](./3-3-2-0/)
|
||||
<DocCardList items={useCurrentSidebarCategory().items}/>
|
||||
```
|
Binary file not shown.
After Width: | Height: | Size: 56 KiB |
|
@ -1,84 +1,84 @@
|
|||
### Configuring taosAdapter
|
||||
#### Configuring taosAdapter
|
||||
|
||||
Method to configure taosAdapter to receive collectd data:
|
||||
|
||||
- Enable the configuration item in the taosAdapter configuration file (default location is /etc/taos/taosadapter.toml)
|
||||
|
||||
```
|
||||
...
|
||||
[opentsdb_telnet]
|
||||
enable = true
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
|
||||
ports = [6046, 6047, 6048, 6049]
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
...
|
||||
```
|
||||
```toml
|
||||
...
|
||||
[opentsdb_telnet]
|
||||
enable = true
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
|
||||
ports = [6046, 6047, 6048, 6049]
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
...
|
||||
```
|
||||
|
||||
The default database name written by taosAdapter is `collectd`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
|
||||
The default database name written by taosAdapter is `collectd`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
|
||||
|
||||
- You can also use taosAdapter command line parameters or set environment variables to start, to enable taosAdapter to receive collectd data, for more details please refer to the taosAdapter reference manual.
|
||||
|
||||
### Configuring collectd
|
||||
#### Configuring collectd
|
||||
|
||||
collectd uses a plugin mechanism that can write the collected monitoring data to different data storage software in various forms. TDengine supports direct collection plugins and write_tsdb plugins.
|
||||
|
||||
#### Configuring to receive direct collection plugin data
|
||||
1. **Configuring to receive direct collection plugin data**
|
||||
|
||||
Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
|
||||
Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
|
||||
|
||||
```text
|
||||
LoadPlugin network
|
||||
<Plugin network>
|
||||
Server "<taosAdapter's host>" "<port for collectd direct>"
|
||||
</Plugin>
|
||||
```
|
||||
```xml
|
||||
LoadPlugin network
|
||||
<Plugin network>
|
||||
Server "<taosAdapter's host>" "<port for collectd direct>"
|
||||
</Plugin>
|
||||
```
|
||||
|
||||
Where \<taosAdapter's host> should be filled with the domain name or IP address of the server running taosAdapter. \<port for collectd direct> should be filled with the port used by taosAdapter to receive collectd data (default is 6045).
|
||||
Where \<taosAdapter's host> should be filled with the domain name or IP address of the server running taosAdapter. \<port for collectd direct> should be filled with the port used by taosAdapter to receive collectd data (default is 6045).
|
||||
|
||||
Example as follows:
|
||||
Example as follows:
|
||||
|
||||
```text
|
||||
LoadPlugin network
|
||||
<Plugin network>
|
||||
Server "127.0.0.1" "6045"
|
||||
</Plugin>
|
||||
```
|
||||
```xml
|
||||
LoadPlugin network
|
||||
<Plugin network>
|
||||
Server "127.0.0.1" "6045"
|
||||
</Plugin>
|
||||
```
|
||||
|
||||
#### Configuring write_tsdb plugin data
|
||||
2. **Configuring write_tsdb plugin data**
|
||||
|
||||
Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
|
||||
Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
|
||||
|
||||
```text
|
||||
LoadPlugin write_tsdb
|
||||
<Plugin write_tsdb>
|
||||
<Node>
|
||||
Host "<taosAdapter's host>"
|
||||
Port "<port for collectd write_tsdb plugin>"
|
||||
...
|
||||
</Node>
|
||||
</Plugin>
|
||||
```
|
||||
```xml
|
||||
LoadPlugin write_tsdb
|
||||
<Plugin write_tsdb>
|
||||
<Node>
|
||||
Host "<taosAdapter's host>"
|
||||
Port "<port for collectd write_tsdb plugin>"
|
||||
...
|
||||
</Node>
|
||||
</Plugin>
|
||||
```
|
||||
|
||||
Where \<taosAdapter's host> should be filled with the domain name or IP address of the server running taosAdapter. \<port for collectd write_tsdb plugin> should be filled with the port used by taosAdapter to receive collectd write_tsdb plugin data (default is 6047).
|
||||
Where \<taosAdapter's host> should be filled with the domain name or IP address of the server running taosAdapter. \<port for collectd write_tsdb plugin> should be filled with the port used by taosAdapter to receive collectd write_tsdb plugin data (default is 6047).
|
||||
|
||||
```text
|
||||
LoadPlugin write_tsdb
|
||||
<Plugin write_tsdb>
|
||||
<Node>
|
||||
Host "127.0.0.1"
|
||||
Port "6047"
|
||||
HostTags "status=production"
|
||||
StoreRates false
|
||||
AlwaysAppendDS false
|
||||
</Node>
|
||||
</Plugin>
|
||||
```
|
||||
```xml
|
||||
LoadPlugin write_tsdb
|
||||
<Plugin write_tsdb>
|
||||
<Node>
|
||||
Host "127.0.0.1"
|
||||
Port "6047"
|
||||
HostTags "status=production"
|
||||
StoreRates false
|
||||
AlwaysAppendDS false
|
||||
</Node>
|
||||
</Plugin>
|
||||
```
|
||||
|
||||
Then restart collectd:
|
||||
Then restart collectd:
|
||||
|
||||
```
|
||||
systemctl restart collectd
|
||||
```
|
||||
```shell
|
||||
systemctl restart collectd
|
||||
```
|
||||
|
|
|
@ -1,43 +1,43 @@
|
|||
### Configuring taosAdapter
|
||||
#### Configuring taosAdapter
|
||||
|
||||
Method to configure taosAdapter to receive icinga2 data:
|
||||
|
||||
- Enable the configuration item in the taosAdapter configuration file (default location /etc/taos/taosadapter.toml)
|
||||
|
||||
```
|
||||
...
|
||||
[opentsdb_telnet]
|
||||
enable = true
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
|
||||
ports = [6046, 6047, 6048, 6049]
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
...
|
||||
```
|
||||
```toml
|
||||
...
|
||||
[opentsdb_telnet]
|
||||
enable = true
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
|
||||
ports = [6046, 6047, 6048, 6049]
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
...
|
||||
```
|
||||
|
||||
The default database name written by taosAdapter is `icinga2`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. taosAdapter needs to be restarted after modifications.
|
||||
The default database name written by taosAdapter is `icinga2`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. taosAdapter needs to be restarted after modifications.
|
||||
|
||||
- You can also use taosAdapter command line parameters or set environment variables to enable taosAdapter to receive icinga2 data, for more details please refer to the taosAdapter reference manual
|
||||
|
||||
### Configuring icinga2
|
||||
#### Configuring icinga2
|
||||
|
||||
- Enable icinga2's opentsdb-writer (reference link https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer)
|
||||
- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` filling in \<taosAdapter's host> with the domain name or IP address of the server running taosAdapter, \<port for icinga2> with the corresponding port supported by taosAdapter for receiving icinga2 data (default is 6048)
|
||||
|
||||
```
|
||||
object OpenTsdbWriter "opentsdb" {
|
||||
host = "<taosAdapter's host>"
|
||||
port = <port for icinga2>
|
||||
}
|
||||
```
|
||||
```c
|
||||
object OpenTsdbWriter "opentsdb" {
|
||||
host = "<taosAdapter's host>"
|
||||
port = <port for icinga2>
|
||||
}
|
||||
```
|
||||
|
||||
Example file:
|
||||
Example file:
|
||||
|
||||
```
|
||||
object OpenTsdbWriter "opentsdb" {
|
||||
host = "127.0.0.1"
|
||||
port = 6048
|
||||
}
|
||||
```
|
||||
```c
|
||||
object OpenTsdbWriter "opentsdb" {
|
||||
host = "127.0.0.1"
|
||||
port = 6048
|
||||
}
|
||||
```
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
Configuring Prometheus is done by editing the Prometheus configuration file `prometheus.yml` (default location `/etc/prometheus/prometheus.yml`).
|
||||
|
||||
### Configure Third-Party Database Address
|
||||
#### Configure Third-Party Database Address
|
||||
|
||||
Set the `remote_read url` and `remote_write url` to point to the domain name or IP address of the server running the taosAdapter service, the REST service port (taosAdapter defaults to 6041), and the name of the database you want to write to in TDengine, ensuring the URLs are formatted as follows:
|
||||
|
||||
- remote_read url: `http://<taosAdapter's host>:<REST service port>/prometheus/v1/remote_read/<database name>`
|
||||
- remote_write url: `http://<taosAdapter's host>:<REST service port>/prometheus/v1/remote_write/<database name>`
|
||||
|
||||
### Configure Basic Authentication
|
||||
#### Configure Basic Authentication
|
||||
|
||||
- username: \<TDengine's username>
|
||||
- password: \<TDengine's password>
|
||||
|
||||
### Example configuration of remote_write and remote_read in the prometheus.yml file
|
||||
#### Example configuration of remote_write and remote_read in the prometheus.yml file
|
||||
|
||||
```yaml
|
||||
remote_write:
|
||||
|
|
|
@ -1,46 +1,46 @@
|
|||
### Configure taosAdapter
|
||||
#### Configure taosAdapter
|
||||
|
||||
Method to configure taosAdapter to receive StatsD data:
|
||||
|
||||
- Enable the configuration item in the taosAdapter configuration file (default location /etc/taos/taosadapter.toml)
|
||||
|
||||
```
|
||||
...
|
||||
[statsd]
|
||||
enable = true
|
||||
port = 6044
|
||||
db = "statsd"
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
worker = 10
|
||||
gatherInterval = "5s"
|
||||
protocol = "udp"
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
allowPendingMessages = 50000
|
||||
deleteCounters = true
|
||||
deleteGauges = true
|
||||
deleteSets = true
|
||||
deleteTimings = true
|
||||
...
|
||||
```
|
||||
```toml
|
||||
...
|
||||
[statsd]
|
||||
enable = true
|
||||
port = 6044
|
||||
db = "statsd"
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
worker = 10
|
||||
gatherInterval = "5s"
|
||||
protocol = "udp"
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
allowPendingMessages = 50000
|
||||
deleteCounters = true
|
||||
deleteGauges = true
|
||||
deleteSets = true
|
||||
deleteTimings = true
|
||||
...
|
||||
```
|
||||
|
||||
The default database name written by taosAdapter is `statsd`, but you can also modify the db item in the taosAdapter configuration file to specify a different name. Fill in the user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
|
||||
The default database name written by taosAdapter is `statsd`, but you can also modify the db item in the taosAdapter configuration file to specify a different name. Fill in the user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
|
||||
|
||||
- You can also use taosAdapter command line arguments or set environment variables to enable the taosAdapter to receive StatsD data. For more details, please refer to the taosAdapter reference manual.
|
||||
|
||||
### Configure StatsD
|
||||
#### Configure StatsD
|
||||
|
||||
To use StatsD, download its [source code](https://github.com/statsd/statsd). Modify its configuration file according to the example file `exampleConfig.js` found in the root directory of the local source code download. Replace \<taosAdapter's host> with the domain name or IP address of the server running taosAdapter, and \<port for StatsD> with the port that taosAdapter uses to receive StatsD data (default is 6044).
|
||||
|
||||
```
|
||||
```text
|
||||
Add to the backends section "./backends/repeater"
|
||||
Add to the repeater section { host:'<taosAdapter's host>', port: <port for StatsD>}
|
||||
```
|
||||
|
||||
Example configuration file:
|
||||
|
||||
```
|
||||
```js
|
||||
{
|
||||
port: 8125
|
||||
, backends: ["./backends/repeater"]
|
||||
|
@ -50,7 +50,7 @@ port: 8125
|
|||
|
||||
After adding the following content, start StatsD (assuming the configuration file is modified to config.js).
|
||||
|
||||
```
|
||||
```shell
|
||||
npm install
|
||||
node stats.js config.js &
|
||||
```
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
### Configuring taosAdapter
|
||||
#### Configuring taosAdapter
|
||||
|
||||
To configure taosAdapter to receive data from TCollector:
|
||||
|
||||
- Enable the configuration in the taosAdapter configuration file (default location /etc/taos/taosadapter.toml)
|
||||
|
||||
```
|
||||
...
|
||||
[opentsdb_telnet]
|
||||
enable = true
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
|
||||
ports = [6046, 6047, 6048, 6049]
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
...
|
||||
```
|
||||
```toml
|
||||
...
|
||||
[opentsdb_telnet]
|
||||
enable = true
|
||||
maxTCPConnections = 250
|
||||
tcpKeepAlive = false
|
||||
dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
|
||||
ports = [6046, 6047, 6048, 6049]
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
...
|
||||
```
|
||||
|
||||
The default database name that taosAdapter writes to is `tcollector`, but you can specify a different name by modifying the dbs option in the taosAdapter configuration file. Fill in the user and password with the actual values configured in TDengine. After modifying the configuration file, taosAdapter needs to be restarted.
|
||||
The default database name that taosAdapter writes to is `tcollector`, but you can specify a different name by modifying the dbs option in the taosAdapter configuration file. Fill in the user and password with the actual values configured in TDengine. After modifying the configuration file, taosAdapter needs to be restarted.
|
||||
|
||||
- You can also use taosAdapter command line arguments or set environment variables to enable the taosAdapter to receive tcollector data. For more details, please refer to the taosAdapter reference manual.
|
||||
|
||||
### Configuring TCollector
|
||||
#### Configuring TCollector
|
||||
|
||||
To use TCollector, download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration options are in its source code. Note: There are significant differences between different versions of TCollector; this only refers to the latest code in the current master branch (git commit: 37ae920).
|
||||
|
||||
|
@ -29,7 +29,7 @@ Modify the contents of `collectors/etc/config.py` and `tcollector.py`. Change th
|
|||
|
||||
Example of git diff output for source code modifications:
|
||||
|
||||
```
|
||||
```diff
|
||||
index e7e7a1c..ec3e23c 100644
|
||||
--- a/collectors/etc/config.py
|
||||
+++ b/collectors/etc/config.py
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
package com.taosdata.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.taosdata.jdbc.AbstractStatement;
|
||||
|
||||
import java.sql.*;
|
||||
|
|
|
@ -104,8 +104,9 @@ public class JdbcDemo {
|
|||
|
||||
private void executeQuery(String sql) {
|
||||
long start = System.currentTimeMillis();
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
try (Statement statement = connection.createStatement();
|
||||
ResultSet resultSet = statement.executeQuery(sql)) {
|
||||
|
||||
long end = System.currentTimeMillis();
|
||||
printSql(sql, true, (end - start));
|
||||
Util.printResult(resultSet);
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
</dependency>
|
||||
<!-- druid -->
|
||||
<dependency>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-parent</artifactId>
|
||||
<version>2.4.0</version>
|
||||
<version>2.7.18</version>
|
||||
<relativePath/> <!-- lookup parent from repository -->
|
||||
</parent>
|
||||
<groupId>com.taosdata.example</groupId>
|
||||
|
@ -18,6 +18,18 @@
|
|||
<java.version>1.8</java.version>
|
||||
</properties>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.baomidou</groupId>
|
||||
<artifactId>mybatis-plus-bom</artifactId>
|
||||
<version>3.5.10.1</version>
|
||||
<type>pom</type>
|
||||
<scope>import</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
@ -28,14 +40,21 @@
|
|||
<artifactId>lombok</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<!-- spring boot2 引入可选模块 -->
|
||||
<dependency>
|
||||
<groupId>com.baomidou</groupId>
|
||||
<artifactId>mybatis-plus-boot-starter</artifactId>
|
||||
<version>3.1.2</version>
|
||||
</dependency>
|
||||
|
||||
<!-- jdk 8+ 引入可选模块 -->
|
||||
<dependency>
|
||||
<groupId>com.baomidou</groupId>
|
||||
<artifactId>mybatis-plus-jsqlparser-4.9</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.h2database</groupId>
|
||||
<artifactId>h2</artifactId>
|
||||
<version>2.3.232</version>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
@ -47,7 +66,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -1,34 +1,26 @@
|
|||
package com.taosdata.example.mybatisplusdemo.config;
|
||||
|
||||
import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.DbType;
|
||||
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
|
||||
import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
|
||||
import org.mybatis.spring.annotation.MapperScan;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.transaction.annotation.EnableTransactionManagement;
|
||||
|
||||
@EnableTransactionManagement
|
||||
@Configuration
|
||||
@MapperScan("com.taosdata.example.mybatisplusdemo.mapper")
|
||||
public class MybatisPlusConfig {
|
||||
|
||||
|
||||
/** mybatis 3.4.1 pagination config start ***/
|
||||
// @Bean
|
||||
// public MybatisPlusInterceptor mybatisPlusInterceptor() {
|
||||
// MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
|
||||
// interceptor.addInnerInterceptor(new PaginationInnerInterceptor());
|
||||
// return interceptor;
|
||||
// }
|
||||
|
||||
// @Bean
|
||||
// public ConfigurationCustomizer configurationCustomizer() {
|
||||
// return configuration -> configuration.setUseDeprecatedExecutor(false);
|
||||
// }
|
||||
|
||||
/**
|
||||
* 添加分页插件
|
||||
*/
|
||||
@Bean
|
||||
public PaginationInterceptor paginationInterceptor() {
|
||||
// return new PaginationInterceptor();
|
||||
PaginationInterceptor paginationInterceptor = new PaginationInterceptor();
|
||||
//TODO: mybatis-plus do not support TDengine, use postgresql Dialect
|
||||
paginationInterceptor.setDialectType("postgresql");
|
||||
|
||||
return paginationInterceptor;
|
||||
public MybatisPlusInterceptor mybatisPlusInterceptor() {
|
||||
MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
|
||||
interceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.MYSQL));
|
||||
return interceptor;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import com.taosdata.example.mybatisplusdemo.domain.Meters;
|
|||
import org.apache.ibatis.annotations.Insert;
|
||||
import org.apache.ibatis.annotations.Param;
|
||||
import org.apache.ibatis.annotations.Update;
|
||||
import org.apache.ibatis.executor.BatchResult;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
@ -15,17 +16,6 @@ public interface MetersMapper extends BaseMapper<Meters> {
|
|||
|
||||
@Insert("insert into meters (tbname, ts, groupid, location, current, voltage, phase) values(#{tbname}, #{ts}, #{groupid}, #{location}, #{current}, #{voltage}, #{phase})")
|
||||
int insertOne(Meters one);
|
||||
|
||||
@Insert({
|
||||
"<script>",
|
||||
"insert into meters (tbname, ts, groupid, location, current, voltage, phase) values ",
|
||||
"<foreach collection='list' item='item' index='index' separator=','>",
|
||||
"(#{item.tbname}, #{item.ts}, #{item.groupid}, #{item.location}, #{item.current}, #{item.voltage}, #{item.phase})",
|
||||
"</foreach>",
|
||||
"</script>"
|
||||
})
|
||||
int insertBatch(@Param("list") List<Meters> metersList);
|
||||
|
||||
@Update("drop stable if exists meters")
|
||||
void dropTable();
|
||||
}
|
||||
|
|
|
@ -11,9 +11,6 @@ public interface TemperatureMapper extends BaseMapper<Temperature> {
|
|||
@Update("CREATE TABLE if not exists temperature(ts timestamp, temperature float) tags(location nchar(64), tbIndex int)")
|
||||
int createSuperTable();
|
||||
|
||||
@Update("create table #{tbName} using temperature tags( #{location}, #{tbindex})")
|
||||
int createTable(@Param("tbName") String tbName, @Param("location") String location, @Param("tbindex") int tbindex);
|
||||
|
||||
@Update("drop table if exists temperature")
|
||||
void dropSuperTable();
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ public interface WeatherMapper extends BaseMapper<Weather> {
|
|||
@Update("CREATE TABLE if not exists weather(ts timestamp, temperature float, humidity int, location nchar(100))")
|
||||
int createTable();
|
||||
|
||||
@Insert("insert into weather (ts, temperature, humidity, location) values(#{ts}, #{temperature}, #{humidity}, #{location})")
|
||||
@Insert("insert into weather (ts, temperature, humidity, location) values(#{ts}, #{temperature}, #{humidity}, #{location, jdbcType=NCHAR})")
|
||||
int insertOne(Weather one);
|
||||
|
||||
@Update("drop table if exists weather")
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
package com.taosdata.example.mybatisplusdemo.service;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
|
||||
@Service
|
||||
public class DatabaseConnectionService {
|
||||
|
||||
@Autowired
|
||||
private DataSource dataSource;
|
||||
|
||||
public Connection getConnection() throws SQLException {
|
||||
return dataSource.getConnection();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package com.taosdata.example.mybatisplusdemo.service;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
|
||||
@Service
|
||||
public class TemperatureService {
|
||||
@Autowired
|
||||
private DatabaseConnectionService databaseConnectionService;
|
||||
|
||||
public void createTable(String tableName, String location, int tbIndex) throws SQLException {
|
||||
|
||||
|
||||
try (Connection connection = databaseConnectionService.getConnection();
|
||||
Statement statement = connection.createStatement()) {
|
||||
statement.executeUpdate("create table " + tableName + " using temperature tags( '" + location +"', " + tbIndex + ")");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.metadata.IPage;
|
|||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import com.taosdata.example.mybatisplusdemo.domain.Meters;
|
||||
import com.taosdata.example.mybatisplusdemo.domain.Weather;
|
||||
import org.apache.ibatis.executor.BatchResult;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -18,6 +19,8 @@ import java.util.LinkedList;
|
|||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import static java.sql.Statement.SUCCESS_NO_INFO;
|
||||
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest
|
||||
public class MetersMapperTest {
|
||||
|
@ -63,8 +66,19 @@ public class MetersMapperTest {
|
|||
metersList.add(one);
|
||||
|
||||
}
|
||||
int affectRows = mapper.insertBatch(metersList);
|
||||
Assert.assertEquals(100, affectRows);
|
||||
List<BatchResult> affectRowsList = mapper.insert(metersList, 10000);
|
||||
|
||||
long totalAffectedRows = 0;
|
||||
for (BatchResult batchResult : affectRowsList) {
|
||||
int[] updateCounts = batchResult.getUpdateCounts();
|
||||
for (int status : updateCounts) {
|
||||
if (status == SUCCESS_NO_INFO) {
|
||||
totalAffectedRows++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Assert.assertEquals(100, totalAffectedRows);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -93,7 +107,7 @@ public class MetersMapperTest {
|
|||
|
||||
@Test
|
||||
public void testSelectCount() {
|
||||
int count = mapper.selectCount(null);
|
||||
long count = mapper.selectCount(null);
|
||||
// Assert.assertEquals(5, count);
|
||||
System.out.println(count);
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
|
|||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import com.taosdata.example.mybatisplusdemo.domain.Temperature;
|
||||
import com.taosdata.example.mybatisplusdemo.service.TemperatureService;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -13,6 +14,8 @@ import org.springframework.beans.factory.annotation.Autowired;
|
|||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -22,18 +25,20 @@ import java.util.Random;
|
|||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest
|
||||
public class TemperatureMapperTest {
|
||||
@Autowired
|
||||
private TemperatureService temperatureService;
|
||||
|
||||
private static Random random = new Random(System.currentTimeMillis());
|
||||
private static String[] locations = {"北京", "上海", "深圳", "广州", "杭州"};
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
public void before() throws SQLException {
|
||||
mapper.dropSuperTable();
|
||||
// create table temperature
|
||||
mapper.createSuperTable();
|
||||
// create table t_X using temperature
|
||||
for (int i = 0; i < 10; i++) {
|
||||
mapper.createTable("t" + i, locations[random.nextInt(locations.length)], i);
|
||||
temperatureService.createTable("t" + i, locations[i % locations.length], i);
|
||||
}
|
||||
// insert into table
|
||||
int affectRows = 0;
|
||||
|
@ -107,7 +112,7 @@ public class TemperatureMapperTest {
|
|||
* **/
|
||||
@Test
|
||||
public void testSelectCount() {
|
||||
int count = mapper.selectCount(null);
|
||||
long count = mapper.selectCount(null);
|
||||
Assert.assertEquals(10, count);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ public class WeatherMapperTest {
|
|||
one.setTemperature(random.nextFloat() * 50);
|
||||
one.setHumidity(random.nextInt(100));
|
||||
one.setLocation("望京");
|
||||
int affectRows = mapper.insert(one);
|
||||
int affectRows = mapper.insertOne(one);
|
||||
Assert.assertEquals(1, affectRows);
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ public class WeatherMapperTest {
|
|||
|
||||
@Test
|
||||
public void testSelectCount() {
|
||||
int count = mapper.selectCount(null);
|
||||
long count = mapper.selectCount(null);
|
||||
// Assert.assertEquals(5, count);
|
||||
System.out.println(count);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<parent>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-parent</artifactId>
|
||||
<version>2.6.15</version>
|
||||
<version>2.7.18</version>
|
||||
<relativePath/> <!-- lookup parent from repository -->
|
||||
</parent>
|
||||
<groupId>com.taosdata.example</groupId>
|
||||
|
@ -34,7 +34,7 @@
|
|||
<dependency>
|
||||
<groupId>org.mybatis.spring.boot</groupId>
|
||||
<artifactId>mybatis-spring-boot-starter</artifactId>
|
||||
<version>2.1.1</version>
|
||||
<version>2.3.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -70,7 +70,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -50,13 +50,6 @@
|
|||
), groupId int)
|
||||
</update>
|
||||
|
||||
<update id="createTable" parameterType="com.taosdata.example.springbootdemo.domain.Weather">
|
||||
create table if not exists test.t#{groupId} using test.weather tags
|
||||
(
|
||||
#{location},
|
||||
#{groupId}
|
||||
)
|
||||
</update>
|
||||
|
||||
<select id="select" resultMap="BaseResultMap">
|
||||
select * from test.weather order by ts desc
|
||||
|
@ -69,8 +62,8 @@
|
|||
</select>
|
||||
|
||||
<insert id="insert" parameterType="com.taosdata.example.springbootdemo.domain.Weather">
|
||||
insert into test.t#{groupId} (ts, temperature, humidity, note, bytes)
|
||||
values (#{ts}, ${temperature}, ${humidity}, #{note}, #{bytes})
|
||||
insert into test.t${groupId} (ts, temperature, humidity, note, bytes)
|
||||
values (#{ts}, #{temperature}, #{humidity}, #{note}, #{bytes})
|
||||
</insert>
|
||||
|
||||
<select id="getSubTables" resultType="String">
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
package com.taosdata.example.springbootdemo.service;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
|
||||
@Service
|
||||
public class DatabaseConnectionService {
|
||||
|
||||
@Autowired
|
||||
private DataSource dataSource;
|
||||
|
||||
public Connection getConnection() throws SQLException {
|
||||
return dataSource.getConnection();
|
||||
}
|
||||
}
|
|
@ -6,6 +6,9 @@ import org.springframework.beans.factory.annotation.Autowired;
|
|||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.sql.Timestamp;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -16,6 +19,9 @@ public class WeatherService {
|
|||
|
||||
@Autowired
|
||||
private WeatherMapper weatherMapper;
|
||||
|
||||
@Autowired
|
||||
private DatabaseConnectionService databaseConnectionService;
|
||||
private Random random = new Random(System.currentTimeMillis());
|
||||
private String[] locations = {"北京", "上海", "广州", "深圳", "天津"};
|
||||
|
||||
|
@ -32,7 +38,7 @@ public class WeatherService {
|
|||
weather.setGroupId(i % locations.length);
|
||||
weather.setNote("note-" + i);
|
||||
weather.setBytes(locations[random.nextInt(locations.length)].getBytes(StandardCharsets.UTF_8));
|
||||
weatherMapper.createTable(weather);
|
||||
createTable(weather);
|
||||
count += weatherMapper.insert(weather);
|
||||
}
|
||||
return count;
|
||||
|
@ -78,4 +84,14 @@ public class WeatherService {
|
|||
weather.setLocation(location);
|
||||
return weather;
|
||||
}
|
||||
|
||||
public void createTable(Weather weather) {
|
||||
try (Connection connection = databaseConnectionService.getConnection();
|
||||
Statement statement = connection.createStatement()) {
|
||||
String tableName = "t" + weather.getGroupId();
|
||||
statement.executeUpdate("create table if not exists " + tableName + " using test.weather tags( '" + weather.getLocation() +"', " + weather.getGroupId() + ")");
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
#spring.datasource.username=root
|
||||
#spring.datasource.password=taosdata
|
||||
# datasource config - JDBC-RESTful
|
||||
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
|
||||
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test
|
||||
spring.datasource.driver-class-name=com.taosdata.jdbc.ws.WebSocketDriver
|
||||
spring.datasource.url=jdbc:TAOS-WS://localhost:6041/test
|
||||
spring.datasource.username=root
|
||||
spring.datasource.password=taosdata
|
||||
spring.datasource.druid.initial-size=5
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
|
|
@ -58,12 +58,13 @@ static int DemoInsertData() {
|
|||
taos_cleanup();
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(result);
|
||||
|
||||
// you can check affectedRows here
|
||||
int rows = taos_affected_rows(result);
|
||||
fprintf(stdout, "Successfully inserted %d rows into power.meters.\n", rows);
|
||||
|
||||
taos_free_result(result);
|
||||
|
||||
// close & clean
|
||||
taos_close(taos);
|
||||
taos_cleanup();
|
||||
|
|
|
@ -0,0 +1,579 @@
|
|||
package com.taosdata.flink.example;
|
||||
|
||||
import com.taosdata.flink.cdc.TDengineCdcSource;
|
||||
import com.taosdata.flink.common.TDengineCdcParams;
|
||||
import com.taosdata.flink.common.TDengineConfigParams;
|
||||
import com.taosdata.flink.sink.TDengineSink;
|
||||
import com.taosdata.flink.source.TDengineSource;
|
||||
import com.taosdata.flink.source.entity.SourceSplitSql;
|
||||
import com.taosdata.flink.source.entity.SplitType;
|
||||
import com.taosdata.flink.source.entity.TimestampSplitInfo;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
|
||||
import org.apache.flink.core.execution.JobClient;
|
||||
import org.apache.flink.shaded.curator5.com.google.common.base.Strings;
|
||||
import org.apache.flink.streaming.api.datastream.DataStreamSource;
|
||||
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
|
||||
import org.apache.flink.table.api.EnvironmentSettings;
|
||||
import org.apache.flink.table.api.TableResult;
|
||||
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
|
||||
import org.apache.flink.table.data.RowData;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.time.Duration;
|
||||
|
||||
import java.time.ZoneId;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
import javax.xml.transform.Source;
|
||||
|
||||
import org.apache.flink.streaming.api.CheckpointingMode;
|
||||
|
||||
|
||||
|
||||
public class Main {
|
||||
static String jdbcUrl = "jdbc:TAOS-WS://localhost:6041?user=root&password=taosdata";
|
||||
static void prepare() throws ClassNotFoundException, SQLException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
String insertQuery = "INSERT INTO " +
|
||||
"power.d1001 USING power.meters TAGS('California.SanFrancisco', 1) " +
|
||||
"VALUES " +
|
||||
"('2024-12-19 19:12:45.642', 50.30000, 201, 0.31000) " +
|
||||
"('2024-12-19 19:12:46.642', 82.60000, 202, 0.33000) " +
|
||||
"('2024-12-19 19:12:47.642', 92.30000, 203, 0.31000) " +
|
||||
"('2024-12-19 18:12:45.642', 50.30000, 201, 0.31000) " +
|
||||
"('2024-12-19 18:12:46.642', 82.60000, 202, 0.33000) " +
|
||||
"('2024-12-19 18:12:47.642', 92.30000, 203, 0.31000) " +
|
||||
"('2024-12-19 17:12:45.642', 50.30000, 201, 0.31000) " +
|
||||
"('2024-12-19 17:12:46.642', 82.60000, 202, 0.33000) " +
|
||||
"('2024-12-19 17:12:47.642', 92.30000, 203, 0.31000) " +
|
||||
"power.d1002 USING power.meters TAGS('Alabama.Montgomery', 2) " +
|
||||
"VALUES " +
|
||||
"('2024-12-19 19:12:45.642', 50.30000, 204, 0.25000) " +
|
||||
"('2024-12-19 19:12:46.642', 62.60000, 205, 0.33000) " +
|
||||
"('2024-12-19 19:12:47.642', 72.30000, 206, 0.31000) " +
|
||||
"('2024-12-19 18:12:45.642', 50.30000, 204, 0.25000) " +
|
||||
"('2024-12-19 18:12:46.642', 62.60000, 205, 0.33000) " +
|
||||
"('2024-12-19 18:12:47.642', 72.30000, 206, 0.31000) " +
|
||||
"('2024-12-19 17:12:45.642', 50.30000, 204, 0.25000) " +
|
||||
"('2024-12-19 17:12:46.642', 62.60000, 205, 0.33000) " +
|
||||
"('2024-12-19 17:12:47.642', 72.30000, 206, 0.31000) ";
|
||||
|
||||
Class.forName("com.taosdata.jdbc.ws.WebSocketDriver");
|
||||
try (Connection connection = DriverManager.getConnection(jdbcUrl, properties);
|
||||
Statement stmt = connection.createStatement()) {
|
||||
|
||||
stmt.executeUpdate("DROP TOPIC IF EXISTS topic_meters");
|
||||
|
||||
stmt.executeUpdate("DROP database IF EXISTS power");
|
||||
// create database
|
||||
int rowsAffected = stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power vgroups 5");
|
||||
|
||||
stmt.executeUpdate("use power");
|
||||
// you can check rowsAffected here
|
||||
System.out.println("Create database power successfully, rowsAffected: " + rowsAffected);
|
||||
// create table
|
||||
rowsAffected = stmt.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);");
|
||||
// you can check rowsAffected here
|
||||
System.out.println("Create stable power.meters successfully, rowsAffected: " + rowsAffected);
|
||||
|
||||
stmt.executeUpdate("CREATE TOPIC topic_meters as SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM meters");
|
||||
|
||||
int affectedRows = stmt.executeUpdate(insertQuery);
|
||||
// you can check affectedRows here
|
||||
System.out.println("Successfully inserted " + affectedRows + " rows to power.meters.");
|
||||
|
||||
stmt.executeUpdate("DROP database IF EXISTS power_sink");
|
||||
// create database
|
||||
stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power_sink vgroups 5");
|
||||
|
||||
stmt.executeUpdate("use power_sink");
|
||||
// you can check rowsAffected here
|
||||
System.out.println("Create database power successfully, rowsAffected: " + rowsAffected);
|
||||
// create table
|
||||
stmt.executeUpdate("CREATE STABLE IF NOT EXISTS sink_meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);");
|
||||
// you can check rowsAffected here
|
||||
|
||||
stmt.executeUpdate("CREATE TABLE IF NOT EXISTS sink_normal (ts timestamp, current float, voltage int, phase float);");
|
||||
// you can check rowsAffected here
|
||||
|
||||
|
||||
} catch (Exception ex) {
|
||||
// please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.printf("Failed to create database power or stable meters, %sErrMessage: %s%n",
|
||||
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
|
||||
ex.getMessage());
|
||||
// Print stack trace for context in examples. Use logging in production.
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
prepare();
|
||||
if (args != null && args.length > 0 && args[0].equals("source")) {
|
||||
testSource();
|
||||
} else if (args != null && args.length > 0 && args[0].equals("table")) {
|
||||
testTableToSink();
|
||||
} else if (args != null && args.length > 0 && args[0].equals("cdc")) {
|
||||
testCustomTypeCdc();
|
||||
}else if (args != null && args.length > 0 && args[0].equals("table-cdc")) {
|
||||
testCdcTableToSink();
|
||||
}
|
||||
}
|
||||
|
||||
static SourceSplitSql getTimeSplit() {
|
||||
// ANCHOR: time_interval
|
||||
SourceSplitSql splitSql = new SourceSplitSql();
|
||||
splitSql.setSql("select ts, `current`, voltage, phase, groupid, location, tbname from meters")
|
||||
.setSplitType(SplitType.SPLIT_TYPE_TIMESTAMP)
|
||||
.setTimestampSplitInfo(new TimestampSplitInfo(
|
||||
"2024-12-19 16:12:48.000",
|
||||
"2024-12-19 19:12:48.000",
|
||||
"ts",
|
||||
Duration.ofHours(1),
|
||||
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"),
|
||||
ZoneId.of("Asia/Shanghai")));
|
||||
// ANCHOR_END: time_interval
|
||||
return splitSql;
|
||||
}
|
||||
|
||||
static SourceSplitSql getTagSplit() throws Exception {
|
||||
// ANCHOR: tag_split
|
||||
SourceSplitSql splitSql = new SourceSplitSql();
|
||||
splitSql.setSql("select ts, current, voltage, phase, groupid, location from meters where voltage > 100")
|
||||
.setTagList(Arrays.asList("groupid >100 and location = 'Shanghai'",
|
||||
"groupid >50 and groupid < 100 and location = 'Guangzhou'",
|
||||
"groupid >0 and groupid < 50 and location = 'Beijing'"))
|
||||
.setSplitType(SplitType.SPLIT_TYPE_TAG);
|
||||
// ANCHOR_END: tag_split
|
||||
return splitSql;
|
||||
}
|
||||
|
||||
static SourceSplitSql getTableSqlit() {
|
||||
// ANCHOR: table_split
|
||||
SourceSplitSql splitSql = new SourceSplitSql();
|
||||
splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
||||
.setTableList(Arrays.asList("d1001", "d1002"))
|
||||
.setOther("order by ts limit 100")
|
||||
.setSplitType(SplitType.SPLIT_TYPE_TABLE);
|
||||
// ANCHOR_END: table_split
|
||||
}
|
||||
|
||||
//ANCHOR: source_test
|
||||
static void testSource() throws Exception {
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
|
||||
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
|
||||
splitSql.setSql("select ts, `current`, voltage, phase, groupid, location, tbname from meters")
|
||||
.setSplitType(SplitType.SPLIT_TYPE_TIMESTAMP)
|
||||
.setTimestampSplitInfo(new TimestampSplitInfo(
|
||||
"2024-12-19 16:12:48.000",
|
||||
"2024-12-19 19:12:48.000",
|
||||
"ts",
|
||||
Duration.ofHours(1),
|
||||
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"),
|
||||
ZoneId.of("Asia/Shanghai")));
|
||||
|
||||
TDengineSource<RowData> source = new TDengineSource<>(connProps, sql, RowData.class);
|
||||
DataStreamSource<RowData> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<RowData, String>) rowData -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("ts: " + rowData.getTimestamp(0, 0) +
|
||||
", current: " + rowData.getFloat(1) +
|
||||
", voltage: " + rowData.getInt(2) +
|
||||
", phase: " + rowData.getFloat(3) +
|
||||
", location: " + new String(rowData.getBinary(4)));
|
||||
sb.append("\n");
|
||||
return sb.toString();
|
||||
});
|
||||
resultStream.print();
|
||||
env.execute("tdengine flink source");
|
||||
|
||||
}
|
||||
//ANCHOR_END: source_test
|
||||
|
||||
//ANCHOR: source_custom_type_test
|
||||
void testCustomTypeSource() throws Exception {
|
||||
System.out.println("testTDengineSourceByTimeSplit start!");
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultSoureDeserialization");
|
||||
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
|
||||
SourceSplitSql splitSql = new SourceSplitSql();
|
||||
splitSql.setSql("select ts, `current`, voltage, phase, groupid, location, tbname from meters")
|
||||
.setSplitType(SplitType.SPLIT_TYPE_TIMESTAMP)
|
||||
//按照时间分片
|
||||
.setTimestampSplitInfo(new TimestampSplitInfo(
|
||||
"2024-12-19 16:12:48.000",
|
||||
"2024-12-19 19:12:48.000",
|
||||
"ts",
|
||||
Duration.ofHours(1),
|
||||
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"),
|
||||
ZoneId.of("Asia/Shanghai")));
|
||||
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
TDengineSource<ResultBean> source = new TDengineSource<>(connProps, splitSql, ResultBean.class);
|
||||
DataStreamSource<ResultBean> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<ResultBean, String>) rowData -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("ts: " + rowData.getTs() +
|
||||
", current: " + rowData.getCurrent() +
|
||||
", voltage: " + rowData.getVoltage() +
|
||||
", phase: " + rowData.getPhase() +
|
||||
", groupid: " + rowData.getGroupid() +
|
||||
", location" + rowData.getLocation() +
|
||||
", tbname: " + rowData.getTbname());
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(rowData.getVoltage());
|
||||
return sb.toString();
|
||||
});
|
||||
resultStream.print();
|
||||
env.execute("flink tdengine source");
|
||||
}
|
||||
//ANCHOR_END: source_custom_type_test
|
||||
|
||||
//ANCHOR: source_batch_test
|
||||
void testBatchSource() throws Exception {
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
connProps.setProperty(TDengineConfigParams.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
|
||||
connProps.setProperty(TDengineConfigParams.TD_BATCH_MODE, "true");
|
||||
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
Class<SourceRecords<RowData>> typeClass = (Class<SourceRecords<RowData>>) (Class<?>) SourceRecords.class;
|
||||
SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters");
|
||||
TDengineSource<SourceRecords<RowData>> source = new TDengineSource<>(connProps, sql, typeClass);
|
||||
DataStreamSource<SourceRecords<RowData>> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<SourceRecords<RowData>, String>) records -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
Iterator<RowData> iterator = records.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
GenericRowData row = (GenericRowData) iterator.next();
|
||||
sb.append("ts: " + row.getTimestamp(0, 0) +
|
||||
", current: " + row.getFloat(1) +
|
||||
", voltage: " + row.getInt(2) +
|
||||
", phase: " + row.getFloat(3) +
|
||||
", location: " + new String(row.getBinary(4)));
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(row.getInt(2));
|
||||
}
|
||||
return sb.toString();
|
||||
});
|
||||
resultStream.print();
|
||||
env.execute("flink tdengine source");
|
||||
|
||||
}
|
||||
//ANCHOR_END: source_batch_test
|
||||
|
||||
//ANCHOR: cdc_source
|
||||
void testTDengineCdc() throws Exception {
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
env.enableCheckpointing(100, AT_LEAST_ONCE);
|
||||
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
|
||||
Properties config = new Properties();
|
||||
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
|
||||
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
|
||||
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
|
||||
config.setProperty(TDengineCdcParams.MSG_WITH_TABLE_NAME, "true");
|
||||
config.setProperty(TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS, "1000");
|
||||
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
|
||||
config.setProperty(TDengineCdcParams.ENABLE_AUTO_COMMIT, "true");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
TDengineCdcSource<RowData> tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class);
|
||||
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<RowData, String>) rowData -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("tsxx: " + rowData.getTimestamp(0, 0) +
|
||||
", current: " + rowData.getFloat(1) +
|
||||
", voltage: " + rowData.getInt(2) +
|
||||
", phase: " + rowData.getFloat(3) +
|
||||
", location: " + new String(rowData.getBinary(4)));
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(rowData.getInt(2));
|
||||
return sb.toString();
|
||||
});
|
||||
resultStream.print();
|
||||
JobClient jobClient = env.executeAsync("Flink test cdc Example");
|
||||
Thread.sleep(5000L);
|
||||
// The task submitted by Flink UI cannot be cancle and needs to be stopped on the UI page.
|
||||
jobClient.cancel().get();
|
||||
}
|
||||
//ANCHOR_END: cdc_source
|
||||
|
||||
//ANCHOR: cdc_batch_source
|
||||
void testTDengineCdcBatch() throws Exception {
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
Properties config = new Properties();
|
||||
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
|
||||
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
|
||||
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
|
||||
config.setProperty(TDengineCdcParams.MSG_WITH_TABLE_NAME, "true");
|
||||
config.setProperty(TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS, "1000");
|
||||
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
config.setProperty(TDengineCdcParams.TMQ_BATCH_MODE, "true");
|
||||
|
||||
Class<ConsumerRecords<RowData>> typeClass = (Class<ConsumerRecords<RowData>>) (Class<?>) ConsumerRecords.class;
|
||||
TDengineCdcSource<ConsumerRecords<RowData>> tdengineSource = new TDengineCdcSource<>("topic_meters", config, typeClass);
|
||||
DataStreamSource<ConsumerRecords<RowData>> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<ConsumerRecords<RowData>, String>) records -> {
|
||||
Iterator<ConsumerRecord<RowData>> iterator = records.iterator();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
while (iterator.hasNext()) {
|
||||
GenericRowData row = (GenericRowData) iterator.next().value();
|
||||
sb.append("tsxx: " + row.getTimestamp(0, 0) +
|
||||
", current: " + row.getFloat(1) +
|
||||
", voltage: " + row.getInt(2) +
|
||||
", phase: " + row.getFloat(3) +
|
||||
", location: " + new String(row.getBinary(4)));
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(row.getInt(2));
|
||||
}
|
||||
return sb.toString();
|
||||
|
||||
});
|
||||
|
||||
resultStream.print();
|
||||
JobClient jobClient = env.executeAsync("Flink test cdc Example");
|
||||
Thread.sleep(5000L);
|
||||
jobClient.cancel().get();
|
||||
}
|
||||
//ANCHOR_END: cdc_batch_source
|
||||
|
||||
//ANCHOR: cdc_custom_type_test
|
||||
static void testCustomTypeCdc() throws Exception {
|
||||
System.out.println("testCustomTypeTDengineCdc start!");
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
env.enableCheckpointing(100, AT_LEAST_ONCE);
|
||||
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
|
||||
env.getCheckpointConfig().setTolerableCheckpointFailureNumber(4);
|
||||
Properties config = new Properties();
|
||||
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
|
||||
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
|
||||
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
|
||||
config.setProperty(TDengineCdcParams.MSG_WITH_TABLE_NAME, "true");
|
||||
config.setProperty(TDengineCdcParams.AUTO_COMMIT_INTERVAL_MS, "1000");
|
||||
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultDeserializer");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
TDengineCdcSource<ResultBean> tdengineSource = new TDengineCdcSource<>("topic_meters", config, ResultBean.class);
|
||||
DataStreamSource<ResultBean> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<ResultBean, String>) rowData -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("ts: " + rowData.getTs() +
|
||||
", current: " + rowData.getCurrent() +
|
||||
", voltage: " + rowData.getVoltage() +
|
||||
", phase: " + rowData.getPhase() +
|
||||
", groupid: " + rowData.getGroupid() +
|
||||
", location" + rowData.getLocation() +
|
||||
", tbname: " + rowData.getTbname());
|
||||
sb.append("\n");
|
||||
totalVoltage.addAndGet(rowData.getVoltage());
|
||||
return sb.toString();
|
||||
});
|
||||
resultStream.print();
|
||||
JobClient jobClient = env.executeAsync("Flink test cdc Example");
|
||||
Thread.sleep(5000L);
|
||||
jobClient.cancel().get();
|
||||
}
|
||||
//ANCHOR_END: cdc_custom_type_test
|
||||
|
||||
//ANCHOR: RowDataToSink
|
||||
static void testRowDataToSink() throws Exception {
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
|
||||
connProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
|
||||
SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters");
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(1);
|
||||
env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
|
||||
TDengineSource<RowData> source = new TDengineSource<>(connProps, sql, RowData.class);
|
||||
DataStreamSource<RowData> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
Properties sinkProps = new Properties();
|
||||
sinkProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_SOURCE_TYPE, "tdengine_source");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_DATABASE_NAME, "power_sink");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_SUPERTABLE_NAME, "sink_meters");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power_sink?user=root&password=taosdata");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_BATCH_SIZE, "2000");
|
||||
|
||||
// Arrays.asList The list of target table field names needs to be consistent with the data order
|
||||
TDengineSink<RowData> sink = new TDengineSink<>(sinkProps,
|
||||
Arrays.asList("ts", "current", "voltage", "phase", "groupid", "location", "tbname"));
|
||||
|
||||
input.sinkTo(sink);
|
||||
env.execute("flink tdengine source");
|
||||
}
|
||||
//ANCHOR_END: RowDataToSink
|
||||
|
||||
//ANCHOR: CdcRowDataToSink
|
||||
static void testCdcToSink() throws Exception {
|
||||
System.out.println("testTDengineCdcToTdSink start!");
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
env.enableCheckpointing(500, CheckpointingMode.AT_LEAST_ONCE);
|
||||
env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
|
||||
env.getCheckpointConfig().setCheckpointTimeout(5000);
|
||||
Properties config = new Properties();
|
||||
config.setProperty(TDengineCdcParams.CONNECT_TYPE, "ws");
|
||||
config.setProperty(TDengineCdcParams.BOOTSTRAP_SERVERS, "localhost:6041");
|
||||
config.setProperty(TDengineCdcParams.AUTO_OFFSET_RESET, "earliest");
|
||||
config.setProperty(TDengineCdcParams.GROUP_ID, "group_1");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_USER, "root");
|
||||
config.setProperty(TDengineCdcParams.CONNECT_PASS, "taosdata");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
|
||||
TDengineCdcSource<RowData> tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class);
|
||||
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
|
||||
Properties sinkProps = new Properties();
|
||||
sinkProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
sinkProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
sinkProps.setProperty(TDengineConfigParams.VALUE_DESERIALIZER, "RowData");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_DATABASE_NAME, "power_sink");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_SUPERTABLE_NAME, "sink_meters");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_JDBC_URL, "jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata");
|
||||
sinkProps.setProperty(TDengineConfigParams.TD_BATCH_SIZE, "2000");
|
||||
|
||||
TDengineSink<RowData> sink = new TDengineSink<>(sinkProps, Arrays.asList("ts", "current", "voltage", "phase", "location", "groupid", "tbname"));
|
||||
input.sinkTo(sink);
|
||||
JobClient jobClient = env.executeAsync("Flink test cdc Example");
|
||||
Thread.sleep(6000L);
|
||||
jobClient.cancel().get();
|
||||
System.out.println("testTDengineCdcToTdSink finish!");
|
||||
}
|
||||
//ANCHOR_END: CdcRowDataToSink
|
||||
|
||||
//ANCHOR: source_table
|
||||
static void testTableToSink() throws Exception {
|
||||
System.out.println("testTableToSink start!");
|
||||
EnvironmentSettings fsSettings = EnvironmentSettings.newInstance().inStreamingMode().build();
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(3);
|
||||
env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
|
||||
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, fsSettings);
|
||||
String tdengineSourceTableDDL = "CREATE TABLE `meters` (" +
|
||||
" ts TIMESTAMP," +
|
||||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata'," +
|
||||
" 'td.jdbc.mode' = 'source'," +
|
||||
" 'table-name' = 'meters'," +
|
||||
" 'scan.query' = 'SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM `meters`'" +
|
||||
")";
|
||||
|
||||
|
||||
String tdengineSinkTableDDL = "CREATE TABLE `sink_meters` (" +
|
||||
" ts TIMESTAMP," +
|
||||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'td.jdbc.mode' = 'sink'," +
|
||||
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power_sink?user=root&password=taosdata'," +
|
||||
" 'sink.db.name' = 'power_sink'," +
|
||||
" 'sink.supertable.name' = 'sink_meters'" +
|
||||
")";
|
||||
|
||||
tableEnv.executeSql(tdengineSourceTableDDL);
|
||||
tableEnv.executeSql(tdengineSinkTableDDL);
|
||||
tableEnv.executeSql("INSERT INTO sink_meters SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM `meters`");
|
||||
}
|
||||
//ANCHOR_END: source_table
|
||||
|
||||
//ANCHOR: cdc_table
|
||||
static void testCdcTableToSink() throws Exception {
|
||||
EnvironmentSettings fsSettings = EnvironmentSettings.newInstance().inStreamingMode().build();
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.setParallelism(5);
|
||||
env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);
|
||||
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, fsSettings);
|
||||
String tdengineSourceTableDDL = "CREATE TABLE `meters` (" +
|
||||
" ts TIMESTAMP," +
|
||||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'bootstrap.servers' = 'localhost:6041'," +
|
||||
" 'td.jdbc.mode' = 'cdc'," +
|
||||
" 'group.id' = 'group_22'," +
|
||||
" 'auto.offset.reset' = 'earliest'," +
|
||||
" 'enable.auto.commit' = 'false'," +
|
||||
" 'topic' = 'topic_meters'" +
|
||||
")";
|
||||
|
||||
|
||||
String tdengineSinkTableDDL = "CREATE TABLE `sink_meters` (" +
|
||||
" ts TIMESTAMP," +
|
||||
" `current` FLOAT," +
|
||||
" voltage INT," +
|
||||
" phase FLOAT," +
|
||||
" location VARBINARY," +
|
||||
" groupid INT," +
|
||||
" tbname VARBINARY" +
|
||||
") WITH (" +
|
||||
" 'connector' = 'tdengine-connector'," +
|
||||
" 'td.jdbc.mode' = 'cdc'," +
|
||||
" 'td.jdbc.url' = 'jdbc:TAOS-WS://localhost:6041/power_sink?user=root&password=taosdata'," +
|
||||
" 'sink.db.name' = 'power_sink'," +
|
||||
" 'sink.supertable.name' = 'sink_meters'" +
|
||||
")";
|
||||
|
||||
tableEnv.executeSql(tdengineSourceTableDDL);
|
||||
tableEnv.executeSql(tdengineSinkTableDDL);
|
||||
|
||||
TableResult tableResult = tableEnv.executeSql("INSERT INTO sink_meters SELECT ts, `current`, voltage, phase, location, groupid, tbname FROM `meters`");
|
||||
|
||||
Thread.sleep(5000L);
|
||||
tableResult.getJobClient().get().cancel().get();
|
||||
}
|
||||
//ANCHOR_END: cdc_table
|
||||
|
||||
|
||||
}
|
|
@ -2,7 +2,7 @@ module goexample
|
|||
|
||||
go 1.17
|
||||
|
||||
require github.com/taosdata/driver-go/v3 v3.5.6
|
||||
require github.com/taosdata/driver-go/v3 v3.6.0
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
|
|
|
@ -18,8 +18,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/taosdata/driver-go/v3 v3.5.6 h1:LDVtMyT3B9p2VREsd5KKM91D4Y7P4kSdh2SQumXi8bk=
|
||||
github.com/taosdata/driver-go/v3 v3.5.6/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
|
||||
github.com/taosdata/driver-go/v3 v3.6.0 h1:4dRXMl01DhIS5xBXUvtkkB+MjL8g64zN674xKd+ojTE=
|
||||
github.com/taosdata/driver-go/v3 v3.6.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/taosdata/driver-go/v3/af"
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
"github.com/taosdata/driver-go/v3/common/stmt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
host := "127.0.0.1"
|
||||
numOfSubTable := 10
|
||||
numOfRow := 10
|
||||
db, err := af.Open(host, "root", "taosdata", "", 0)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to connect to " + host + "; ErrMessage: " + err.Error())
|
||||
}
|
||||
defer db.Close()
|
||||
// prepare database and table
|
||||
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create database power, ErrMessage: " + err.Error())
|
||||
}
|
||||
_, err = db.Exec("USE power")
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to use database power, ErrMessage: " + err.Error())
|
||||
}
|
||||
_, err = db.Exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error())
|
||||
}
|
||||
// prepare statement
|
||||
sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)"
|
||||
reqID := common.GetReqID()
|
||||
stmt2 := db.Stmt2(reqID, false)
|
||||
err = stmt2.Prepare(sql)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to prepare sql, sql: " + sql + ", ErrMessage: " + err.Error())
|
||||
}
|
||||
for i := 1; i <= numOfSubTable; i++ {
|
||||
// generate column data
|
||||
current := time.Now()
|
||||
columns := make([][]driver.Value, 4)
|
||||
for j := 0; j < numOfRow; j++ {
|
||||
columns[0] = append(columns[0], current.Add(time.Millisecond*time.Duration(j)))
|
||||
columns[1] = append(columns[1], rand.Float32()*30)
|
||||
columns[2] = append(columns[2], rand.Int31n(300))
|
||||
columns[3] = append(columns[3], rand.Float32())
|
||||
}
|
||||
// generate bind data
|
||||
tableName := fmt.Sprintf("d_bind_%d", i)
|
||||
tags := []driver.Value{int32(i), []byte(fmt.Sprintf("location_%d", i))}
|
||||
bindData := []*stmt.TaosStmt2BindData{
|
||||
{
|
||||
TableName: tableName,
|
||||
Tags: tags,
|
||||
Cols: columns,
|
||||
},
|
||||
}
|
||||
// bind params
|
||||
err = stmt2.Bind(bindData)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to bind params, ErrMessage: " + err.Error())
|
||||
}
|
||||
// execute batch
|
||||
err = stmt2.Execute()
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to exec, ErrMessage: " + err.Error())
|
||||
}
|
||||
// get affected rows
|
||||
affected := stmt2.GetAffectedRows()
|
||||
// you can check exeResult here
|
||||
fmt.Printf("Successfully inserted %d rows to %s.\n", affected, tableName)
|
||||
}
|
||||
err = stmt2.Close()
|
||||
if err != nil {
|
||||
log.Fatal("failed to close statement, err:", err)
|
||||
}
|
||||
}
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.4.0</version>
|
||||
<version>3.5.3</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
|
||||
// ANCHOR: para_bind
|
||||
public class WSParameterBindingExtendInterfaceDemo {
|
||||
|
||||
// modify host to your own
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS-WS://" + host + ":6041";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
init(conn);
|
||||
|
||||
String sql = "INSERT INTO ? USING power.meters TAGS(?,?) VALUES (?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("d_bind_" + i);
|
||||
|
||||
// set tags
|
||||
pstmt.setTagInt(0, i);
|
||||
pstmt.setTagString(1, "location_" + i);
|
||||
|
||||
// set column ts
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
// set column current
|
||||
ArrayList<Float> currentList = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
currentList.add(random.nextFloat() * 30);
|
||||
pstmt.setFloat(1, currentList);
|
||||
|
||||
// set column voltage
|
||||
ArrayList<Integer> voltageList = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
voltageList.add(random.nextInt(300));
|
||||
pstmt.setInt(2, voltageList);
|
||||
|
||||
// set column phase
|
||||
ArrayList<Float> phaseList = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
phaseList.add(random.nextFloat());
|
||||
pstmt.setFloat(3, phaseList);
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute column
|
||||
pstmt.columnDataExecuteBatch();
|
||||
// you can check exeResult here
|
||||
System.out.println("Successfully inserted " + (numOfSubTable * numOfRow) + " rows to power.meters.");
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
// please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n",
|
||||
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
|
||||
ex.getMessage());
|
||||
// Print stack trace for context in examples. Use logging in production.
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("CREATE DATABASE IF NOT EXISTS power");
|
||||
stmt.execute("USE power");
|
||||
stmt.execute(
|
||||
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
|
||||
}
|
||||
}
|
||||
}
|
||||
// ANCHOR_END: para_bind
|
|
@ -2,6 +2,7 @@ package com.taos.example;
|
|||
|
||||
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.sql.*;
|
||||
import java.util.Random;
|
||||
|
||||
|
@ -26,7 +27,12 @@ public class WSParameterBindingFullDemo {
|
|||
"binary_col BINARY(100), " +
|
||||
"nchar_col NCHAR(100), " +
|
||||
"varbinary_col VARBINARY(100), " +
|
||||
"geometry_col GEOMETRY(100)) " +
|
||||
"geometry_col GEOMETRY(100)," +
|
||||
"utinyint_col tinyint unsigned," +
|
||||
"usmallint_col smallint unsigned," +
|
||||
"uint_col int unsigned," +
|
||||
"ubigint_col bigint unsigned" +
|
||||
") " +
|
||||
"tags (" +
|
||||
"int_tag INT, " +
|
||||
"double_tag DOUBLE, " +
|
||||
|
@ -34,7 +40,12 @@ public class WSParameterBindingFullDemo {
|
|||
"binary_tag BINARY(100), " +
|
||||
"nchar_tag NCHAR(100), " +
|
||||
"varbinary_tag VARBINARY(100), " +
|
||||
"geometry_tag GEOMETRY(100))"
|
||||
"geometry_tag GEOMETRY(100)," +
|
||||
"utinyint_tag tinyint unsigned," +
|
||||
"usmallint_tag smallint unsigned," +
|
||||
"uint_tag int unsigned," +
|
||||
"ubigint_tag bigint unsigned" +
|
||||
")"
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
|
@ -79,7 +90,7 @@ public class WSParameterBindingFullDemo {
|
|||
// set table name
|
||||
pstmt.setTableName("ntb_json_" + i);
|
||||
// set tags
|
||||
pstmt.setTagJson(1, "{\"device\":\"device_" + i + "\"}");
|
||||
pstmt.setTagJson(0, "{\"device\":\"device_" + i + "\"}");
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
|
@ -94,25 +105,29 @@ public class WSParameterBindingFullDemo {
|
|||
}
|
||||
|
||||
private static void stmtAll(Connection conn) throws SQLException {
|
||||
String sql = "INSERT INTO ? using stb tags(?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)";
|
||||
String sql = "INSERT INTO ? using stb tags(?,?,?,?,?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
// set table name
|
||||
pstmt.setTableName("ntb");
|
||||
// set tags
|
||||
pstmt.setTagInt(1, 1);
|
||||
pstmt.setTagDouble(2, 1.1);
|
||||
pstmt.setTagBoolean(3, true);
|
||||
pstmt.setTagString(4, "binary_value");
|
||||
pstmt.setTagNString(5, "nchar_value");
|
||||
pstmt.setTagVarbinary(6, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||
pstmt.setTagGeometry(7, new byte[] {
|
||||
pstmt.setTagInt(0, 1);
|
||||
pstmt.setTagDouble(1, 1.1);
|
||||
pstmt.setTagBoolean(2, true);
|
||||
pstmt.setTagString(3, "binary_value");
|
||||
pstmt.setTagNString(4, "nchar_value");
|
||||
pstmt.setTagVarbinary(5, new byte[] { (byte) 0x98, (byte) 0xf4, 0x6e });
|
||||
pstmt.setTagGeometry(6, new byte[] {
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||
pstmt.setTagShort(7, (short)255);
|
||||
pstmt.setTagInt(8, 65535);
|
||||
pstmt.setTagLong(9, 4294967295L);
|
||||
pstmt.setTagBigInteger(10, new BigInteger("18446744073709551615"));
|
||||
|
||||
long current = System.currentTimeMillis();
|
||||
|
||||
|
@ -129,6 +144,10 @@ public class WSParameterBindingFullDemo {
|
|||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40 });
|
||||
pstmt.setShort(9, (short)255);
|
||||
pstmt.setInt(10, 65535);
|
||||
pstmt.setLong(11, 4294967295L);
|
||||
pstmt.setObject(12, new BigInteger("18446744073709551615"));
|
||||
pstmt.addBatch();
|
||||
pstmt.executeBatch();
|
||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb");
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue