Merge branch '3.0' into feat/highvolume
|
@ -0,0 +1,25 @@
|
|||
name: Cancel Workflow on Merge
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
cancel-workflow:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Workflow if Merged or Closed
|
||||
if: ${{ github.event.pull_request.merged || github.event.pull_request.state == 'closed' }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo "PR has been merged or closed, cancelling workflow..."
|
||||
gh auth status
|
||||
gh run list \
|
||||
--repo ${{ github.repository }} \
|
||||
--branch ${{ github.event.pull_request.head.ref }} \
|
||||
--workflow "TDengine Test" \
|
||||
--status in_progress \
|
||||
--status queued \
|
||||
--json databaseId --jq '.[].databaseId' | \
|
||||
xargs -I {} gh run cancel --repo ${{ github.repository }} {}
|
|
@ -1,93 +0,0 @@
|
|||
name: TDengine CI Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-TDengine
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
|
||||
jobs:
|
||||
fetch-parameters:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, testing]
|
||||
outputs:
|
||||
run_function_test: ${{ steps.parameters.outputs.run_function_test }}
|
||||
run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }}
|
||||
steps:
|
||||
- name: Determine trigger source and fetch parameters
|
||||
id: parameters
|
||||
run: |
|
||||
set -euo pipefail
|
||||
target_branch=${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Check whether to run tdgpt test cases
|
||||
cd ${{ env.WKC }}
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
|
||||
echo "changed files exclude doc: ${changed_files_non_doc}"
|
||||
|
||||
if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
|
||||
run_tdgpt_test="true"
|
||||
else
|
||||
run_tdgpt_test="false"
|
||||
fi
|
||||
echo "run tdgpt test: ${run_tdgpt_test}"
|
||||
|
||||
# Check whether to run function test cases
|
||||
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
|
||||
grep -v "^docs/en/" | \
|
||||
grep -v "^docs/zh/" | \
|
||||
grep -v ".md$" | \
|
||||
grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
|
||||
tr '\n' ' ' || :)
|
||||
echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
|
||||
|
||||
if [ -n "$changed_files_non_tdgpt" ]; then
|
||||
run_function_test="true"
|
||||
else
|
||||
run_function_test="false"
|
||||
fi
|
||||
|
||||
echo "run function test: ${run_function_test}"
|
||||
|
||||
# Output the results for GitHub Actions
|
||||
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
|
||||
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ${{ github.event.pull_request.head.ref }}
|
||||
echo ${{ github.event.pull_request.base.ref }}
|
||||
echo ${{ github.event.pull_request.number }}
|
||||
|
||||
run-tests-on-linux:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
run_function_test: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
run_tdgpt_test: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' }}
|
||||
|
||||
run-tests-on-mac:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
|
||||
run-tests-on-windows:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
|
@ -1,9 +1,12 @@
|
|||
name: taosKeeper Build
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
paths:
|
||||
- tools/keeper/**
|
||||
- 'tools/keeper/**'
|
||||
|
||||
jobs:
|
||||
build:
|
|
@ -0,0 +1,126 @@
|
|||
name: TDengine Build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
- 'enh/cmake-TD-33848'
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
- 'tests/**'
|
||||
- '**/*.md'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Run on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-22.04
|
||||
- ubuntu-24.04
|
||||
- macos-13
|
||||
- macos-14
|
||||
- macos-15
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Install dependencies on Linux
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
gawk \
|
||||
libgeos-dev \
|
||||
libjansson-dev \
|
||||
liblzma-dev \
|
||||
libsnappy-dev \
|
||||
libssl-dev \
|
||||
libz-dev \
|
||||
pkg-config \
|
||||
zlib1g
|
||||
|
||||
- name: Install dependencies on macOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew update
|
||||
brew install \
|
||||
argp-standalone \
|
||||
gawk \
|
||||
gflags \
|
||||
geos \
|
||||
jansson \
|
||||
openssl \
|
||||
pkg-config \
|
||||
snappy \
|
||||
zlib
|
||||
|
||||
- name: prepare install path
|
||||
run: |
|
||||
sudo mkdir -p /usr/local/lib
|
||||
sudo mkdir -p /usr/local/include
|
||||
|
||||
- name: Build and install TDengine
|
||||
run: |
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_TOOLS=true \
|
||||
-DBUILD_KEEPER=true \
|
||||
-DBUILD_HTTP=false \
|
||||
-DBUILD_TEST=true \
|
||||
-DWEBSOCKET=true \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
||||
which taosd
|
||||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Statistics ldd
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -name "*.so" -print0 | xargs -0 ldd || true
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ldd || true
|
||||
|
||||
- name: Statistics size
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -print0 | xargs -0 ls -lhrS
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ls -lhrS
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
||||
sudo echo "supportVnodes 256" >> taos.cfg
|
||||
nohup sudo taosd -c taos.cfg &
|
||||
|
||||
- name: Start taosadapter
|
||||
run: nohup sudo taosadapter &
|
||||
|
||||
- name: Run tests with taosBenchmark
|
||||
run: |
|
||||
taosBenchmark -t 10 -n 10 -y
|
||||
taos -s "select count(*) from test.meters"
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if pgrep taosd; then sudo pkill taosd; fi
|
||||
if pgrep taosadapter; then sudo pkill taosadapter; fi
|
|
@ -7,14 +7,13 @@ on:
|
|||
- '3.0'
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '*.md'
|
||||
|
||||
env:
|
||||
DOC_WKC: "/root/doc_ci_work"
|
||||
ZH_DOC_REPO: "docs.taosdata.com"
|
||||
EN_DOC_REPO: "docs.tdengine.com"
|
||||
TD_REPO: "TDengine"
|
||||
TOOLS_REPO: "taos-tools"
|
||||
DOC_WKC: '/root/doc_ci_work'
|
||||
ZH_DOC_REPO: 'docs.taosdata.com'
|
||||
EN_DOC_REPO: 'docs.tdengine.com'
|
||||
TD_REPO: 'TDengine'
|
||||
TOOLS_REPO: 'taos-tools'
|
||||
|
||||
jobs:
|
||||
build-doc:
|
|
@ -1,19 +1,15 @@
|
|||
name: TDengine Build
|
||||
name: TDengine Release Build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
- 'enh/cmake-TD-33848'
|
||||
|
||||
- '3.*'
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
- 'tests/**'
|
||||
- '*.md'
|
||||
- '**/*.md'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
@ -21,17 +17,14 @@ concurrency:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and test on ${{ matrix.os }}
|
||||
name: Run on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-22.04
|
||||
- ubuntu-24.04
|
||||
- macos-13
|
||||
- macos-14
|
||||
- macos-15
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
|
@ -81,6 +74,8 @@ jobs:
|
|||
-DBUILD_KEEPER=true \
|
||||
-DBUILD_HTTP=false \
|
||||
-DBUILD_TEST=true \
|
||||
-DWEBSOCKET=true \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
||||
|
@ -88,6 +83,16 @@ jobs:
|
|||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Statistics ldd
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -name "*.so" -print0 | xargs -0 ldd || true
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ldd || true
|
||||
|
||||
- name: Statistics size
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -print0 | xargs -0 ls -lhrS
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ls -lhrS
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
|
@ -0,0 +1,69 @@
|
|||
name: TDengine Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
- 'tools/tdgpt/**'
|
||||
- 'source/libs/executor/src/forecastoperator.c'
|
||||
- 'source/libs/executor/src/anomalywindowoperator.c'
|
||||
- 'source/dnode/mnode/impl/src/mndAnode.c'
|
||||
- 'include/common/tanalytics.h'
|
||||
- 'source/common/src/tanalytics.c'
|
||||
- 'tests/parallel/tdgpt_cases.task'
|
||||
- 'tests/script/tsim/analytics'
|
||||
- '**/*.md'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
specified_source_branch:
|
||||
description: 'Enter the source branch name of TDengine'
|
||||
required: true
|
||||
type: string
|
||||
specified_target_branch:
|
||||
description: 'Enter the target branch name of TDengine'
|
||||
required: true
|
||||
type: string
|
||||
specified_pr_number:
|
||||
description: 'Enter the PR number of TDengine'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-
|
||||
${{ github.event_name == 'pull_request' && github.event.pull_request.base.ref || inputs.specified_target_branch }}-
|
||||
${{ github.event_name == 'pull_request' && github.event.pull_request.number || inputs.specified_pr_number }}-TDengine
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
|
||||
jobs:
|
||||
run-tests-on-linux:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
|
||||
with:
|
||||
tdinternal: false
|
||||
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_pr_number }}
|
||||
|
||||
run-tests-on-mac:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
|
||||
with:
|
||||
tdinternal: false
|
||||
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_pr_number }}
|
||||
|
||||
run-tests-on-windows:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
|
||||
with:
|
||||
tdinternal: false
|
||||
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_pr_number }}
|
|
@ -1,51 +0,0 @@
|
|||
name: TDgpt CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '3.0'
|
||||
paths:
|
||||
- 'tools/tdgpt/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/tools/tdgpt
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install flake8 pytest pylint
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
|
||||
- name: Checking the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py') --exit-zero
|
||||
|
||||
- name: Checking the code with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
|
||||
- name: Run test cases with pytest
|
||||
run: |
|
||||
pytest
|
|
@ -0,0 +1,203 @@
|
|||
# Run unit-test and system-test cases for TDgpt when TDgpt code is changed.
|
||||
|
||||
name: TDgpt Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.3.6'
|
||||
paths:
|
||||
- 'tools/tdgpt/**'
|
||||
- 'source/libs/executor/src/forecastoperator.c'
|
||||
- 'source/libs/executor/src/anomalywindowoperator.c'
|
||||
- 'source/dnode/mnode/impl/src/mndAnode.c'
|
||||
- 'include/common/tanalytics.h'
|
||||
- 'source/common/src/tanalytics.c'
|
||||
- 'tests/parallel/tdgpt_cases.task'
|
||||
- 'tests/script/tsim/analytics'
|
||||
|
||||
jobs:
|
||||
unit-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/tools/tdgpt
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install flake8 pytest pylint
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
|
||||
- name: Checking the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py') --exit-zero
|
||||
|
||||
- name: Checking the code with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
|
||||
- name: Run test cases with pytest
|
||||
run: |
|
||||
pytest
|
||||
|
||||
function-test:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, testing]
|
||||
|
||||
env:
|
||||
CONTAINER_NAME: 'taosd-test'
|
||||
WKDIR: '/var/lib/jenkins/workspace'
|
||||
WK: '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
SOURCE_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
steps:
|
||||
- name: Output the environment information
|
||||
run: |
|
||||
echo "::group::Environment Info"
|
||||
date
|
||||
hostname
|
||||
env
|
||||
echo "Runner: ${{ runner.name }}"
|
||||
echo "Workspace: ${{ env.WKDIR }}"
|
||||
git --version
|
||||
echo "${{ env.WKDIR }}/restore.sh -p PR-${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Prepare repositories
|
||||
run: |
|
||||
set -euo pipefail
|
||||
prepare_environment() {
|
||||
cd "$1"
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "$2"
|
||||
}
|
||||
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
|
||||
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
|
||||
|
||||
- name: Get latest codes and logs
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
cd ${{ env.WK }}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
|
||||
- name: Update submodule
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git submodule update --init --recursive
|
||||
|
||||
- name: Detect non-doc files changed
|
||||
run: |
|
||||
mkdir -p ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}
|
||||
cd ${{ env.WKC }}
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only \
|
||||
FETCH_HEAD \
|
||||
$(git merge-base FETCH_HEAD ${{ env.TARGET_BRANCH }}) | \
|
||||
grep -v "^docs/en/" | \
|
||||
grep -v "^docs/zh/" | \
|
||||
grep -v ".md$" | \
|
||||
tr '\n' ' ' || : \
|
||||
)
|
||||
echo $changed_files_non_doc > \
|
||||
${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt
|
||||
|
||||
- name: Check assert testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_assert_container.sh -d ${{ env.WKDIR }}
|
||||
|
||||
- name: Check void function testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_void_container.sh -d ${{ env.WKDIR }}
|
||||
|
||||
- name: Build docker container
|
||||
run: |
|
||||
date
|
||||
rm -rf ${{ env.WKC }}/debug
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
time ./container_build.sh -w ${{ env.WKDIR }} -e
|
||||
|
||||
- name: Get parameters for testing
|
||||
id: get_param
|
||||
run: |
|
||||
log_server_file="/home/log_server.json"
|
||||
timeout_cmd=""
|
||||
extra_param=""
|
||||
|
||||
if [ -f "$log_server_file" ]; then
|
||||
log_server_enabled=$(jq '.enabled' "$log_server_file")
|
||||
timeout_param=$(jq '.timeout' "$log_server_file")
|
||||
if [ "$timeout_param" != "null" ] && [ "$timeout_param" != "0" ]; then
|
||||
timeout_cmd="timeout $timeout_param"
|
||||
fi
|
||||
|
||||
if [ "$log_server_enabled" == "1" ]; then
|
||||
log_server=$(jq '.server' "$log_server_file" | sed 's/\\\"//g')
|
||||
if [ "$log_server" != "null" ] && [ "$log_server" != "" ]; then
|
||||
extra_param="-w $log_server"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
echo "timeout_cmd=$timeout_cmd" >> $GITHUB_OUTPUT
|
||||
echo "extra_param=$extra_param" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run function returns with a null pointer scan testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_scan_container.sh \
|
||||
-d ${{ env.WKDIR }} \
|
||||
-b ${{ env.PR_NUMBER }}_${{ github.run_number }} \
|
||||
-f ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt \
|
||||
${{ steps.get_param.outputs.extra_param }}
|
||||
|
||||
- name: Run tdgpt test cases
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
timeout 600 time ./run.sh -e \
|
||||
-m /home/m.json \
|
||||
-t tdgpt_cases.task \
|
||||
-b "${{ env.PR_NUMBER }}_${{ github.run_number }}" \
|
||||
-l ${{ env.WKDIR }}/log \
|
||||
-o 300 ${{ steps.get_param.outputs.extra_param }}
|
|
@ -1,3 +1,5 @@
|
|||
# Scheduled updates for the TDgpt service.
|
||||
|
||||
name: TDgpt Update Service
|
||||
|
||||
on:
|
||||
|
|
|
@ -51,7 +51,6 @@ pysim/
|
|||
tests/script/api/batchprepare
|
||||
taosadapter
|
||||
taosadapter-debug
|
||||
tools/taos-tools/*
|
||||
tools/taosws-rs/*
|
||||
tools/taosadapter/*
|
||||
tools/upx*
|
||||
|
@ -60,7 +59,6 @@ tools/upx*
|
|||
html/
|
||||
/.vs
|
||||
/CMakeFiles/3.10.2
|
||||
/CMakeCache.txt
|
||||
/Makefile
|
||||
/*.cmake
|
||||
/src/cq/test/CMakeFiles/cqtest.dir/*.cmake
|
||||
|
@ -133,14 +131,12 @@ tools/THANKS
|
|||
tools/NEWS
|
||||
tools/COPYING
|
||||
tools/BUGS
|
||||
tools/taos-tools
|
||||
tools/taosws-rs
|
||||
tags
|
||||
.clangd
|
||||
*CMakeCache*
|
||||
*CMakeFiles*
|
||||
.history/
|
||||
*.txt
|
||||
*.tcl
|
||||
*.pc
|
||||
contrib/geos
|
||||
|
|
|
@ -19,21 +19,21 @@ include(${TD_SUPPORT_DIR}/cmake.options)
|
|||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
enable_testing()
|
||||
|
||||
set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES OFF)
|
||||
|
||||
add_library(api INTERFACE)
|
||||
target_include_directories(api INTERFACE "include/client")
|
||||
|
||||
add_subdirectory(contrib)
|
||||
add_subdirectory(source)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
include(CTest)
|
||||
enable_testing()
|
||||
add_subdirectory(examples/c)
|
||||
endif(${BUILD_TEST})
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
add_library(api INTERFACE)
|
||||
target_include_directories(api INTERFACE "include/client")
|
||||
add_subdirectory(source)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
|
13
Jenkinsfile2
|
@ -112,16 +112,6 @@ def build_pre_docs(){
|
|||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
'''
|
||||
|
||||
sh '''
|
||||
cd ${DOC_WKC}/${tools_repo}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git fetch
|
||||
git remote prune origin
|
||||
git checkout ''' + env.CHANGE_TARGET + '''
|
||||
git pull >/dev/null
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -371,6 +361,7 @@ def pre_test_build_win() {
|
|||
pip3 install taospy==2.7.21
|
||||
pip3 install taos-ws-py==0.3.8
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taosnative.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
@ -389,7 +380,9 @@ def run_win_test() {
|
|||
bat '''
|
||||
echo "windows test ..."
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taosnative.dll C:\\Windows\\System32
|
||||
ls -l C:\\Windows\\System32\\taos.dll
|
||||
ls -l C:\\Windows\\System32\\taosnative.dll
|
||||
time /t
|
||||
cd %WIN_SYSTEM_TEST_ROOT%
|
||||
echo "testing ..."
|
||||
|
|
|
@ -80,7 +80,7 @@ TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。
|
|||
### Ubuntu 18.04、20.04、22.04
|
||||
|
||||
```bash
|
||||
sudo apt-get udpate
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
|
||||
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
|
||||
```
|
||||
|
@ -188,7 +188,7 @@ cmake .. && cmake --build .
|
|||
|
||||
如果你想要编译 taosAdapter,需要添加 `-DBUILD_HTTP=false` 选项。
|
||||
|
||||
如果你想要编译 taosKeeper,需要添加 `--DBUILD_KEEPER=true` 选项。
|
||||
如果你想要编译 taosKeeper,需要添加 `-DBUILD_KEEPER=true` 选项。
|
||||
|
||||
</details>
|
||||
|
||||
|
|
20
README.md
|
@ -8,7 +8,7 @@
|
|||
</a>
|
||||
</p>
|
||||
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml)
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/tdengine-test.yml)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
|
||||
[](https://github.com/feici02/TDengine/commits/main/)
|
||||
<br />
|
||||
|
@ -29,9 +29,9 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine
|
|||
1. [Introduction](#1-introduction)
|
||||
1. [Documentation](#2-documentation)
|
||||
1. [Prerequisites](#3-prerequisites)
|
||||
- [3.1 Prerequisites On Linux](#31-on-linux)
|
||||
- [3.2 Prerequisites On macOS](#32-on-macos)
|
||||
- [3.3 Prerequisites On Windows](#33-on-windows)
|
||||
- [3.1 Prerequisites On Linux](#31-prerequisites-on-linux)
|
||||
- [3.2 Prerequisites On macOS](#32-prerequisites-on-macos)
|
||||
- [3.3 Prerequisites On Windows](#33-prerequisites-on-windows)
|
||||
- [3.4 Clone the repo](#34-clone-the-repo)
|
||||
1. [Building](#4-building)
|
||||
- [4.1 Build on Linux](#41-build-on-linux)
|
||||
|
@ -70,6 +70,8 @@ TDengine is an open source, high-performance, cloud native [time-series database
|
|||
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
|
||||
For the latest TDengine component TDgpt, please refer to [TDgpt README](./tools/tdgpt/README.md) for details.
|
||||
|
||||
# 2. Documentation
|
||||
|
||||
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
|
||||
|
@ -84,7 +86,7 @@ At the moment, TDengine server supports running on Linux/Windows/MacOS systems.
|
|||
|
||||
If you want to compile taosAdapter or taosKeeper, you need to install Go 1.18 or above.
|
||||
|
||||
## 3.1 On Linux
|
||||
## 3.1 Prerequisites on Linux
|
||||
|
||||
<details>
|
||||
|
||||
|
@ -109,7 +111,7 @@ yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatom
|
|||
|
||||
</details>
|
||||
|
||||
## 3.2 On macOS
|
||||
## 3.2 Prerequisites on macOS
|
||||
|
||||
<details>
|
||||
|
||||
|
@ -123,7 +125,7 @@ brew install argp-standalone gflags pkgconfig
|
|||
|
||||
</details>
|
||||
|
||||
## 3.3 On Windows
|
||||
## 3.3 Prerequisites on Windows
|
||||
|
||||
<details>
|
||||
|
||||
|
@ -172,7 +174,7 @@ make
|
|||
|
||||
If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
|
||||
|
||||
If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
|
||||
If you want to compile taosKeeper, you need to add the `-DBUILD_KEEPER=true` option.
|
||||
|
||||
You can use Jemalloc as memory allocator instead of glibc:
|
||||
|
||||
|
@ -204,7 +206,7 @@ cmake .. && cmake --build .
|
|||
|
||||
If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
|
||||
|
||||
If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
|
||||
If you want to compile taosKeeper, you need to add the `-DBUILD_KEEPER=true` option.
|
||||
|
||||
</details>
|
||||
|
||||
|
|
|
@ -97,14 +97,10 @@ ELSE()
|
|||
SET(TD_TAOS_TOOLS TRUE)
|
||||
ENDIF()
|
||||
|
||||
SET(TAOS_LIB taos)
|
||||
SET(TAOS_LIB taos)
|
||||
SET(TAOS_LIB_STATIC taos_static)
|
||||
|
||||
IF(${TD_WINDOWS})
|
||||
SET(TAOS_LIB_PLATFORM_SPEC taos_static)
|
||||
ELSE()
|
||||
SET(TAOS_LIB_PLATFORM_SPEC taos)
|
||||
ENDIF()
|
||||
SET(TAOS_NATIVE_LIB taosnative)
|
||||
SET(TAOS_NATIVE_LIB_STATIC taosnative_static)
|
||||
|
||||
# build TSZ by default
|
||||
IF("${TSZ_ENABLED}" MATCHES "false")
|
||||
|
|
|
@ -9,61 +9,61 @@ option(
|
|||
)
|
||||
|
||||
IF(${TD_WINDOWS})
|
||||
IF(NOT TD_ASTRA)
|
||||
MESSAGE("build pthread Win32")
|
||||
option(
|
||||
BUILD_PTHREAD
|
||||
"If build pthread on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build pthread Win32")
|
||||
option(
|
||||
BUILD_PTHREAD
|
||||
"If build pthread on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build gnu regex for Windows")
|
||||
option(
|
||||
BUILD_GNUREGEX
|
||||
"If build gnu regex on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build gnu regex for Windows")
|
||||
option(
|
||||
BUILD_GNUREGEX
|
||||
"If build gnu regex on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build iconv Win32")
|
||||
option(
|
||||
BUILD_WITH_ICONV
|
||||
"If build iconv on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build iconv Win32")
|
||||
option(
|
||||
BUILD_WITH_ICONV
|
||||
"If build iconv on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build msvcregex Win32")
|
||||
option(
|
||||
BUILD_MSVCREGEX
|
||||
"If build msvcregex on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build msvcregex Win32")
|
||||
option(
|
||||
BUILD_MSVCREGEX
|
||||
"If build msvcregex on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build wcwidth Win32")
|
||||
option(
|
||||
BUILD_WCWIDTH
|
||||
"If build wcwidth on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build wcwidth Win32")
|
||||
option(
|
||||
BUILD_WCWIDTH
|
||||
"If build wcwidth on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build wingetopt Win32")
|
||||
option(
|
||||
BUILD_WINGETOPT
|
||||
"If build wingetopt on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build wingetopt Win32")
|
||||
option(
|
||||
BUILD_WINGETOPT
|
||||
"If build wingetopt on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
TDENGINE_3
|
||||
"TDengine 3.x for taos-tools"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_CRASHDUMP
|
||||
"If build crashdump on Windows"
|
||||
ON
|
||||
)
|
||||
option(
|
||||
TDENGINE_3
|
||||
"TDengine 3.x for taos-tools"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_CRASHDUMP
|
||||
"If build crashdump on Windows"
|
||||
ON
|
||||
)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN_64)
|
||||
IF(${BUILD_TEST})
|
||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||
|
@ -71,58 +71,102 @@ ELSEIF (TD_DARWIN_64)
|
|||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build with geos"
|
||||
BUILD_WITH_LEMON
|
||||
"If build with lemon"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_UDF
|
||||
"If build with UDF"
|
||||
ON
|
||||
)
|
||||
|
||||
IF(NOT TD_ASTRA)
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build with geos"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
RUST_BINDINGS
|
||||
"If build with rust-bindings"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_PCRE2
|
||||
"If build with pcre2"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
OFF
|
||||
option(
|
||||
JEMALLOC_ENABLED
|
||||
"If build with jemalloc"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SANITIZER
|
||||
"If build sanitizer"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_ADDR2LINE
|
||||
"If build addr2line"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LEVELDB
|
||||
"If build with leveldb"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
RUST_BINDINGS
|
||||
"If build with rust-bindings"
|
||||
ON
|
||||
option(
|
||||
BUILD_WITH_ROCKSDB
|
||||
"If build with rocksdb"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_PCRE2
|
||||
"If build with pcre2"
|
||||
ON
|
||||
)
|
||||
option(
|
||||
BUILD_WITH_LZ4
|
||||
"If build with lz4"
|
||||
ON
|
||||
)
|
||||
ELSE ()
|
||||
|
||||
option(
|
||||
JEMALLOC_ENABLED
|
||||
"If build with jemalloc"
|
||||
OFF
|
||||
option(
|
||||
BUILD_WITH_LZMA2
|
||||
"If build with lzma2"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SANITIZER
|
||||
"If build sanitizer"
|
||||
OFF
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_ADDR2LINE
|
||||
"If build addr2line"
|
||||
OFF
|
||||
)
|
||||
ADD_DEFINITIONS(-DUSE_AUDIT)
|
||||
ADD_DEFINITIONS(-DUSE_GEOS)
|
||||
ADD_DEFINITIONS(-DUSE_UDF)
|
||||
ADD_DEFINITIONS(-DUSE_STREAM)
|
||||
ADD_DEFINITIONS(-DUSE_PRCE2)
|
||||
ADD_DEFINITIONS(-DUSE_RSMA)
|
||||
ADD_DEFINITIONS(-DUSE_TSMA)
|
||||
ADD_DEFINITIONS(-DUSE_TQ)
|
||||
ADD_DEFINITIONS(-DUSE_TOPIC)
|
||||
ADD_DEFINITIONS(-DUSE_MONITOR)
|
||||
ADD_DEFINITIONS(-DUSE_REPORT)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LEVELDB
|
||||
"If build with leveldb"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_ROCKSDB
|
||||
"If build with rocksdb"
|
||||
ON
|
||||
)
|
||||
IF(${TD_ASTRA_RPC})
|
||||
ADD_DEFINITIONS(-DTD_ASTRA_RPC)
|
||||
ENDIF()
|
||||
|
||||
IF(${TD_LINUX})
|
||||
|
||||
|
@ -150,6 +194,12 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LZMA2
|
||||
"If build with lzma2"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
IF(NOT TD_ENTERPRISE)
|
||||
|
@ -191,6 +241,14 @@ option(BUILD_WITH_COS "If build with cos" OFF)
|
|||
|
||||
ENDIF ()
|
||||
|
||||
IF(${TAOSD_INTEGRATED})
|
||||
add_definitions(-DTAOSD_INTEGRATED)
|
||||
ENDIF()
|
||||
|
||||
IF(${TD_AS_LIB})
|
||||
add_definitions(-DTD_AS_LIB)
|
||||
ENDIF()
|
||||
|
||||
option(
|
||||
BUILD_WITH_SQLITE
|
||||
"If build with sqlite"
|
||||
|
@ -209,6 +267,14 @@ option(
|
|||
off
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_NURAFT
|
||||
"If build with NuRaft"
|
||||
OFF
|
||||
)
|
||||
|
||||
IF(NOT TD_ASTRA)
|
||||
|
||||
option(
|
||||
BUILD_WITH_UV
|
||||
"If build with libuv"
|
||||
|
@ -242,6 +308,7 @@ option(
|
|||
"If use invertedIndex"
|
||||
ON
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_RELEASE
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
|
||||
# xz
|
||||
|
||||
if (${TD_LINUX})
|
||||
if (${BUILD_WITH_LZMA2})
|
||||
ExternalProject_Add(lzma2
|
||||
GIT_REPOSITORY https://github.com/conor42/fast-lzma2.git
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/lzma2"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 3.0
|
||||
GIT_TAG 3.3.6
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.0
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -92,7 +92,9 @@ if(${BUILD_TEST})
|
|||
endif(${BUILD_TEST})
|
||||
|
||||
# lz4
|
||||
cat("${TD_SUPPORT_DIR}/lz4_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
if(${BUILD_WITH_LZ4})
|
||||
cat("${TD_SUPPORT_DIR}/lz4_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_LZ4})
|
||||
|
||||
# zlib
|
||||
cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -186,16 +188,22 @@ if(${BUILD_PCRE2})
|
|||
cat("${TD_SUPPORT_DIR}/pcre2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
find_program(C_COMPILER_LEMON NAMES gcc)
|
||||
if(C_COMPILER_LEMON)
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
else()
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
endif()
|
||||
|
||||
# lemon
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
if(${BUILD_WITH_LEMON})
|
||||
if(${TD_ACORE})
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
else()
|
||||
find_program(C_COMPILER_LEMON NAMES gcc)
|
||||
endif()
|
||||
if(C_COMPILER_LEMON)
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
else()
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
endif()
|
||||
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers
|
||||
IF(${TD_DARWIN})
|
||||
|
@ -273,11 +281,13 @@ unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
|||
# endif()
|
||||
|
||||
# lz4
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
lz4_static
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/lz4/lib
|
||||
)
|
||||
if(${BUILD_WITH_LZ4})
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
lz4_static
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/lz4/lib
|
||||
)
|
||||
endif(${BUILD_WITH_LZ4})
|
||||
|
||||
# zlib
|
||||
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
|
||||
|
|
|
@ -191,7 +191,7 @@ INTERVAL(interval_val [, interval_offset])
|
|||
|
||||
The time window clause includes 3 sub-clauses:
|
||||
|
||||
- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies;
|
||||
- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies its starting offset. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset";
|
||||
- SLIDING clause: used to specify the time the window slides forward;
|
||||
- FILL: used to specify the filling mode of data in case of missing data in the window interval.
|
||||
|
||||
|
|
|
@ -69,10 +69,10 @@ This statement creates a subscription that includes all table data in the databa
|
|||
|
||||
## Delete Topic
|
||||
|
||||
If you no longer need to subscribe to the data, you can delete the topic. Note that only topics that are not currently subscribed can be deleted.
|
||||
If you no longer need to subscribe to the data, you can delete the topic. If the current topic is subscribed to by a consumer, it can be forcibly deleted using the FORCE syntax. After the forced deletion, the subscribed consumer will consume data with errors (FORCE syntax supported from version 3.3.6.0).
|
||||
|
||||
```sql
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
|
||||
```
|
||||
|
||||
## View Topics
|
||||
|
@ -99,10 +99,10 @@ Displays information about all consumers in the current database, including the
|
|||
|
||||
### Delete Consumer Group
|
||||
|
||||
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted with the following statement when there are no consumers in the group:
|
||||
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted. If there are consumers in the current consumer group who are consuming, the FORCE syntax can be used to force deletion. After forced deletion, subscribed consumers will consume data with errors (FORCE syntax supported from version 3.3.6.0).
|
||||
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
## Data Subscription
|
||||
|
@ -137,6 +137,7 @@ If the following 3 data entries were written, then during replay, the first entr
|
|||
|
||||
When using the data subscription's replay feature, note the following:
|
||||
|
||||
- Enable replay function by configuring the consumption parameter enable.replay to true
|
||||
- The replay function of data subscription only supports data playback for query subscriptions; supertable and database subscriptions do not support playback.
|
||||
- Replay does not support progress saving.
|
||||
- Because data playback itself requires processing time, there is a precision error of several tens of milliseconds in playback.
|
||||
|
|
|
@ -26,11 +26,11 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
|
|||
SUBTABLE(expression) AS subquery
|
||||
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,13 @@ Under normal circumstances, stream computation tasks will not process data that
|
|||
|
||||
By enabling the fill_history option, the created stream computation task will be capable of processing data written before, during, and after the creation of the stream. This means that data written either before or after the creation of the stream will be included in the scope of stream computation, thus ensuring data integrity and consistency. This setting provides users with greater flexibility, allowing them to flexibly handle historical and new data according to actual needs.
|
||||
|
||||
Tips:
|
||||
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
|
||||
|
||||
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
|
||||
|
||||
- Besides, do not create multiple streams asynchronously at the same time, as transaction conflicts may cause subsequent streams to fail.
|
||||
|
||||
For example, create a stream to count the number of data entries generated by all smart meters every 10s, and also calculate historical data. SQL as follows:
|
||||
|
||||
```sql
|
||||
|
@ -135,8 +142,12 @@ When creating a stream, you can specify the trigger mode of stream computing thr
|
|||
1. AT_ONCE: Triggered immediately upon writing.
|
||||
2. WINDOW_CLOSE: Triggered when the window closes (the closing of the window is determined by the event time, can be used in conjunction with watermark).
|
||||
3. MAX_DELAY time: If the window closes, computation is triggered. If the window has not closed, and the duration since it has not closed exceeds the time specified by max delay, computation is triggered.
|
||||
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE.
|
||||
|
||||
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does support sliding); In this mode, FILL_HISTORY is automatically set to 0, IGNORE EXPIRED is automatically set to 1 and IGNORE UPDATE is automatically set to 1; FILL only supports PREV, NULL, NONE, VALUE.
|
||||
- This mode can be used to implement continuous queries, such as creating a stream that queries the number of data entries in the past 10 seconds window every 1 second。SQL as follows:
|
||||
```sql
|
||||
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
|
||||
```
|
||||
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
|
||||
The closing of the window is determined by the event time, such as when the event stream is interrupted or continuously delayed, at which point the event time cannot be updated, possibly leading to outdated computation results.
|
||||
|
||||
Therefore, stream computing provides the MAX_DELAY trigger mode that combines event time with processing time: MAX_DELAY mode triggers computation immediately when the window closes, and its unit can be specified, specific units: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). Additionally, when data is written, if the time that triggers computation exceeds the time specified by MAX_DELAY, computation is triggered immediately.
|
||||
|
|
|
@ -107,21 +107,23 @@ The header is the first line of the CSV file, with the following rules:
|
|||
|
||||
(1) The header of the CSV can configure the following columns:
|
||||
|
||||
| Number | Column Name | Description | Required | Default Behavior |
|
||||
| ------ | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | point_id | The id of the data point on the OPC UA server | Yes | None |
|
||||
| 2 | stable | The corresponding supertable for the data point in TDengine | Yes | None |
|
||||
| 3 | tbname | The corresponding subtable for the data point in TDengine | Yes | None |
|
||||
| 4 | enable | Whether to collect data from this point | No | Use the unified default value `1` for enable |
|
||||
| 5 | value_col | The column name in TDengine corresponding to the collected value of the data point | No | Use the unified default value `val` as the value_col |
|
||||
| 6 | value_transform | The transformation function executed in taosX for the collected value of the data point | No | Do not transform the collected value uniformly |
|
||||
| 7 | type | The data type of the collected value of the data point | No | Use the original type of the collected value as the data type in TDengine |
|
||||
| 8 | quality_col | The column name in TDengine corresponding to the quality of the collected value | No | Do not add a quality column in TDengine uniformly |
|
||||
| 9 | ts_col | The original timestamp column of the data point in TDengine | No | If both ts_col and received_ts_col are non-empty, use the former as the timestamp column; if one of ts_col or received_ts_col is non-empty, use the non-empty column as the timestamp column; if both are empty, use the original timestamp of the data point as the timestamp column with the default name `ts`. |
|
||||
| 10 | received_ts_col | The timestamp column in TDengine when the data point value is received | No | Same as above |
|
||||
| 11 | ts_transform | The transformation function executed in taosX for the original timestamp of the data point | No | Do not transform the original timestamp of the data point uniformly |
|
||||
| 12 | received_ts_transform | The transformation function executed in taosX for the received timestamp of the data point | No | Do not transform the received timestamp of the data point uniformly |
|
||||
| 13 | tag::VARCHAR(200)::name | The Tag column corresponding to the data point in TDengine. Here `tag` is a reserved keyword indicating that this column is a tag; `VARCHAR(200)` indicates the type of tag; `name` is the actual name of the tag. | No | If 1 or more tag columns are configured, use the configured tag columns; if no tag columns are configured and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured and stable does not exist in TDengine, automatically add the following 2 tag columns: tag::VARCHAR(256)::point_id and tag::VARCHAR(256)::point_name |
|
||||
| Number | Column Name | Description | Required | Default Behavior |
|
||||
|--------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -------- |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 1 | point_id | The id of the data point on the OPC UA server | Yes | None |
|
||||
| 2 | stable | The corresponding supertable for the data point in TDengine | Yes | None |
|
||||
| 3 | tbname | The corresponding subtable for the data point in TDengine | Yes | None |
|
||||
| 4 | enable | Whether to collect data from this point | No | Use the unified default value `1` for enable |
|
||||
| 5 | value_col | The column name in TDengine corresponding to the collected value of the data point | No | Use the unified default value `val` as the value_col |
|
||||
| 6 | value_transform | The transformation function executed in taosX for the collected value of the data point | No | Do not transform the collected value uniformly |
|
||||
| 7 | type | The data type of the collected value of the data point | No | Use the original type of the collected value as the data type in TDengine |
|
||||
| 8 | quality_col | The column name in TDengine corresponding to the quality of the collected value | No | Do not add a quality column in TDengine uniformly |
|
||||
| 9 | ts_col | The original timestamp column of the data point in TDengine | No | ts_col, request_ts, received_ts these 3 columns, when there are more than 2 columns, the leftmost column is used as the primary key in TDengine. |
|
||||
| 10 | request_ts_col | The timestamp column in TDengine when the data point value is request | No | Same as above |
|
||||
| 11 | received_ts_col | The timestamp column in TDengine when the data point value is received | No | Same as above |
|
||||
| 12 | ts_transform | The transformation function executed in taosX for the original timestamp of the data point | No | Do not transform the original timestamp of the data point uniformly |
|
||||
| 13 | request_ts_transform | The transformation function executed in taosX for the request timestamp of the data point | No | Do not transform the original timestamp of the data point uniformly |
|
||||
| 14 | received_ts_transform | The transformation function executed in taosX for the received timestamp of the data point | No | Do not transform the received timestamp of the data point uniformly |
|
||||
| 15 | tag::VARCHAR(200)::name | The Tag column corresponding to the data point in TDengine. Here `tag` is a reserved keyword indicating that this column is a tag; `VARCHAR(200)` indicates the type of tag; `name` is the actual name of the tag. | No | If 1 or more tag columns are configured, use the configured tag columns; if no tag columns are configured and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured and stable does not exist in TDengine, automatically add the following 2 tag columns: tag::VARCHAR(256)::point_id and tag::VARCHAR(256)::point_name |
|
||||
|
||||
(2) In the CSV Header, there cannot be duplicate columns;
|
||||
|
||||
|
@ -137,21 +139,23 @@ Each Row in the CSV file configures an OPC data point. The rules for Rows are as
|
|||
|
||||
(1) Correspondence with columns in the Header
|
||||
|
||||
| Number | Column in Header | Type of Value | Value Range | Mandatory | Default Value |
|
||||
| ------ | ----------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------------------ |
|
||||
| 1 | point_id | String | Strings like `ns=3;i=1005`, must meet the OPC UA ID specification, i.e., include ns and id parts | Yes | |
|
||||
| 2 | enable | int | 0: Do not collect this point, and delete the corresponding subtable in TDengine before the OPC DataIn task starts; 1: Collect this point, do not delete the subtable before the OPC DataIn task starts. | No | 1 |
|
||||
| 3 | stable | String | Any string that meets the TDengine supertable naming convention; if special character `.` exists, replace with underscore if `{type}` exists: if type in CSV file is not empty, replace with the value of type if type is empty, replace with the original type of the collected value | Yes | |
|
||||
| 4 | tbname | String | Any string that meets the TDengine subtable naming convention; for OPC UA: if `{ns}` exists, replace with ns from point_id if `{id}` exists, replace with id from point_id for OPC DA: if `{tag_name}` exists, replace with tag_name | Yes | |
|
||||
| 5 | value_col | String | Column name that meets TDengine naming convention | No | val |
|
||||
| 6 | value_transform | String | Expressions that meet the Rhai engine, for example: `(val + 10) / 1000 * 2.0`, `log(val) + 10`, etc.; | No | None |
|
||||
| 7 | type | String | Supported types include: b/bool/i8/tinyint/i16/small/inti32/int/i64/bigint/u8/tinyint unsigned/u16/smallint unsigned/u32/int unsigned/u64/bigint unsigned/f32/float/f64/double/timestamp/timestamp(ms)/timestamp(us)/timestamp(ns)/json | No | Original type of the data point value |
|
||||
| 8 | quality_col | String | Column name that meets TDengine naming convention | No | None |
|
||||
| 9 | ts_col | String | Column name that meets TDengine naming convention | No | ts |
|
||||
| 10 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 11 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; ts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; ts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | No | None |
|
||||
| 12 | received_ts_transform | String | No | None | |
|
||||
| 13 | tag::VARCHAR(200)::name | String | The value inside a tag, when the tag type is VARCHAR, can be in Chinese | No | NULL |
|
||||
| Number | Column in Header | Type of Value | Value Range | Mandatory | Default Value |
|
||||
|--------|-------------------------| ------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| --------- |---------------------------------------|
|
||||
| 1 | point_id | String | Strings like `ns=3;i=1005`, must meet the OPC UA ID specification, i.e., include ns and id parts | Yes | |
|
||||
| 2 | enable | int | 0: Do not collect this point, and delete the corresponding subtable in TDengine before the OPC DataIn task starts; 1: Collect this point, do not delete the subtable before the OPC DataIn task starts. | No | 1 |
|
||||
| 3 | stable | String | Any string that meets the TDengine supertable naming convention; if special character `.` exists, replace with underscore if `{type}` exists: if type in CSV file is not empty, replace with the value of type if type is empty, replace with the original type of the collected value | Yes | |
|
||||
| 4 | tbname | String | Any string that meets the TDengine subtable naming convention; for OPC UA: if `{ns}` exists, replace with ns from point_id if `{id}` exists, replace with id from point_id for OPC DA: if `{tag_name}` exists, replace with tag_name | Yes | |
|
||||
| 5 | value_col | String | Column name that meets TDengine naming convention | No | val |
|
||||
| 6 | value_transform | String | Expressions that meet the Rhai engine, for example: `(val + 10) / 1000 * 2.0`, `log(val) + 10`, etc.; | No | None |
|
||||
| 7 | type | String | Supported types include: b/bool/i8/tinyint/i16/small/inti32/int/i64/bigint/u8/tinyint unsigned/u16/smallint unsigned/u32/int unsigned/u64/bigint unsigned/f32/float/f64/double/timestamp/timestamp(ms)/timestamp(us)/timestamp(ns)/json | No | Original type of the data point value |
|
||||
| 8 | quality_col | String | Column name that meets TDengine naming convention | No | None |
|
||||
| 9 | ts_col | String | Column name that meets TDengine naming convention | No | ts |
|
||||
| 10 | request_ts_col | String | Column name that meets TDengine naming convention | No | qts |
|
||||
| 11 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 12 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; ts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; ts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | No | None |
|
||||
| 13 | request_ts_transform | String | Supports +, -, *, /, % operators, for example: qts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; qts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; qts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | No | None |
|
||||
| 14 | received_ts_transform | String | Supports +, -, *, /, % operators, for example: qts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; qts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; qts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | None | None |
|
||||
| 15 | tag::VARCHAR(200)::name | String | The value inside a tag, when the tag type is VARCHAR, can be in Chinese | No | NULL |
|
||||
|
||||
(2) `point_id` is unique throughout the DataIn task, meaning: in an OPC DataIn task, a data point can only be written to one subtable in TDengine. If you need to write a data point to multiple subtables, you need to create multiple OPC DataIn tasks;
|
||||
|
||||
|
@ -171,7 +175,7 @@ Data points can be filtered by configuring **Root Node ID**, **Namespace**, **Re
|
|||
|
||||
Configure **Supertable Name**, **Table Name** to specify the supertable and subtable where the data will be written.
|
||||
|
||||
Configure **Primary Key Column**, choose `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choose `received_ts` to use the data's reception timestamp as the primary key in TDengine. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
Configure **Primary Key Column**, choose `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choose `request_ts` to use the data's request timestamp as the primary key in TDengine; choose `received_ts` to use the data's reception timestamp as the primary key in TDengine. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep5} alt=""/>
|
||||
|
|
|
@ -81,21 +81,23 @@ The header is the first line of the CSV file, with the following rules:
|
|||
|
||||
(1) The header of the CSV can configure the following columns:
|
||||
|
||||
| No. | Column Name | Description | Required | Default Behavior |
|
||||
| ---- | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | tag_name | The id of the data point on the OPC DA server | Yes | None |
|
||||
| 2 | stable | The supertable in TDengine corresponding to the data point | Yes | None |
|
||||
| 3 | tbname | The subtable in TDengine corresponding to the data point | Yes | None |
|
||||
| 4 | enable | Whether to collect data from this point | No | Use a unified default value `1` for enable |
|
||||
| 5 | value_col | The column name in TDengine corresponding to the collected value of the data point | No | Use a unified default value `val` as the value_col |
|
||||
| 6 | value_transform | The transform function executed in taosX for the collected value of the data point | No | Do not perform a transform on the collected value |
|
||||
| 7 | type | The data type of the collected value of the data point | No | Use the original type of the collected value as the data type in TDengine |
|
||||
| 8 | quality_col | The column name in TDengine corresponding to the quality of the collected value | No | Do not add a quality column in TDengine |
|
||||
| 9 | ts_col | The timestamp column in TDengine corresponding to the original timestamp of the data point | No | If both ts_col and received_ts_col are non-empty, use the former as the timestamp column; if one of ts_col or received_ts_col is non-empty, use the non-empty column as the timestamp column; if both are empty, use the original timestamp of the data point as the timestamp column in TDengine, with the default column name ts. |
|
||||
| 10 | received_ts_col | The timestamp column in TDengine corresponding to the timestamp when the data point value was received | No | |
|
||||
| 11 | ts_transform | The transform function executed in taosX for the original timestamp of the data point | No | Do not perform a transform on the original timestamp of the data point |
|
||||
| 12 | received_ts_transform | The transform function executed in taosX for the received timestamp of the data point | No | Do not perform a transform on the received timestamp of the data point |
|
||||
| 13 | tag::VARCHAR(200)::name | The Tag column in TDengine corresponding to the data point. Where `tag` is a reserved keyword, indicating that this column is a tag column; `VARCHAR(200)` indicates the type of this tag, which can also be other legal types; `name` is the actual name of this tag. | No | If configuring more than one tag column, use the configured tag columns; if no tag columns are configured, and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured, and stable does not exist in TDengine, automatically add the following two tag columns by default: tag::VARCHAR(256)::point_idtag::VARCHAR(256)::point_name |
|
||||
| No. | Column Name | Description | Required | Default Behavior |
|
||||
|-----|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| 1 | tag_name | The id of the data point on the OPC DA server | Yes | None |
|
||||
| 2 | stable | The supertable in TDengine corresponding to the data point | Yes | None |
|
||||
| 3 | tbname | The subtable in TDengine corresponding to the data point | Yes | None |
|
||||
| 4 | enable | Whether to collect data from this point | No | Use a unified default value `1` for enable |
|
||||
| 5 | value_col | The column name in TDengine corresponding to the collected value of the data point | No | Use a unified default value `val` as the value_col |
|
||||
| 6 | value_transform | The transform function executed in taosX for the collected value of the data point | No | Do not perform a transform on the collected value |
|
||||
| 7 | type | The data type of the collected value of the data point | No | Use the original type of the collected value as the data type in TDengine |
|
||||
| 8 | quality_col | The column name in TDengine corresponding to the quality of the collected value | No | Do not add a quality column in TDengine |
|
||||
| 9 | ts_col | The timestamp column in TDengine corresponding to the original timestamp of the data point | No | ts_col, request_ts, received_ts these 3 columns, when there are more than 2 columns, the leftmost column is used as the primary key in TDengine. |
|
||||
| 10 | request_ts_col | The timestamp column in TDengine corresponding to the timestamp when the data point value was request | No | Same as above |
|
||||
| 11 | received_ts_col | The timestamp column in TDengine corresponding to the timestamp when the data point value was received | No | Same as above |
|
||||
| 12 | ts_transform | The transform function executed in taosX for the original timestamp of the data point | No | Do not perform a transform on the original timestamp of the data point |
|
||||
| 13 | request_ts_transform | The transform function executed in taosX for the request timestamp of the data point | No | Do not perform a transform on the received timestamp of the data point |
|
||||
| 14 | received_ts_transform | The transform function executed in taosX for the received timestamp of the data point | No | Do not perform a transform on the received timestamp of the data point |
|
||||
| 15 | tag::VARCHAR(200)::name | The Tag column in TDengine corresponding to the data point. Where `tag` is a reserved keyword, indicating that this column is a tag column; `VARCHAR(200)` indicates the type of this tag, which can also be other legal types; `name` is the actual name of this tag. | No | If configuring more than one tag column, use the configured tag columns; if no tag columns are configured, and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured, and stable does not exist in TDengine, automatically add the following two tag columns by default: tag::VARCHAR(256)::point_idtag::VARCHAR(256)::point_name |
|
||||
|
||||
(2) In the CSV Header, there cannot be duplicate columns;
|
||||
|
||||
|
@ -112,7 +114,7 @@ Each Row in the CSV file configures an OPC data point. The rules for Rows are as
|
|||
(1) Correspondence with columns in the Header
|
||||
|
||||
| Number | Column in Header | Type of Value | Range of Values | Mandatory | Default Value |
|
||||
| ------ | ----------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------------------ |
|
||||
|--------|-------------------------| ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------------------ |
|
||||
| 1 | tag_name | String | Strings like `root.parent.temperature`, must meet the OPC DA ID specification | Yes | |
|
||||
| 2 | enable | int | 0: Do not collect this point, and delete the corresponding subtable in TDengine before the OPC DataIn task starts; 1: Collect this point, do not delete the subtable before the OPC DataIn task starts. | No | 1 |
|
||||
| 3 | stable | String | Any string that meets the TDengine supertable naming convention; if there are special characters `.`, replace with underscore. If `{type}` exists: if type in CSV file is not empty, replace with the value of type; if empty, replace with the original type of the collected value | Yes | |
|
||||
|
@ -122,10 +124,12 @@ Each Row in the CSV file configures an OPC data point. The rules for Rows are as
|
|||
| 7 | type | String | Supported types include: b/bool/i8/tinyint/i16/smallint/i32/int/i64/bigint/u8/tinyint unsigned/u16/smallint unsigned/u32/int unsigned/u64/bigint unsigned/f32/floatf64/double/timestamp/timestamp(ms)/timestamp(us)/timestamp(ns)/json | No | Original type of data point value |
|
||||
| 8 | quality_col | String | Column name that meets TDengine naming convention | No | None |
|
||||
| 9 | ts_col | String | Column name that meets TDengine naming convention | No | ts |
|
||||
| 10 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 11 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a ms unit timestamp to 0; ts + 8 *3600* 1000, adds 8 hours to a ms precision timestamp; ts - 8 *3600* 1000, subtracts 8 hours from a ms precision timestamp; | No | None |
|
||||
| 12 | received_ts_transform | String | No | None | |
|
||||
| 13 | tag::VARCHAR(200)::name | String | The value in tag, when the tag type is VARCHAR, it can be in Chinese | No | NULL |
|
||||
| 10 | request_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 11 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 12 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a ms unit timestamp to 0; ts + 8 *3600* 1000, adds 8 hours to a ms precision timestamp; ts - 8 *3600* 1000, subtracts 8 hours from a ms precision timestamp; | No | None |
|
||||
| 13 | request_ts_transform | String | No | None | |
|
||||
| 14 | received_ts_transform | String | No | None | |
|
||||
| 15 | tag::VARCHAR(200)::name | String | The value in tag, when the tag type is VARCHAR, it can be in Chinese | No | NULL |
|
||||
|
||||
(2) `tag_name` is unique throughout the DataIn task, that is: in an OPC DataIn task, a data point can only be written to one subtable in TDengine. If you need to write a data point to multiple subtables, you need to create multiple OPC DataIn tasks;
|
||||
|
||||
|
@ -145,7 +149,7 @@ Data points can be filtered by configuring the **Root Node ID** and **Regular Ex
|
|||
|
||||
Configure **Supertable Name** and **Table Name** to specify the supertable and subtable where the data will be written.
|
||||
|
||||
Configure **Primary Key Column**, choosing `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choosing `received_ts` to use the timestamp when the data is received as the primary key. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
Configure **Primary Key Column**, choosing `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choosing `request_ts` to use the timestamp when the data is request as the primary key; choosing `received_ts` to use the timestamp when the data is received as the primary key. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep4} alt=""/>
|
||||
|
|
|
@ -146,9 +146,19 @@ Not supported
|
|||
```
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
The example code for binding parameters with stmt2 (TDengine v3.3.5.0 or higher is required) is as follows:
|
||||
|
||||
```c
|
||||
{{#include docs/examples/c/stmt2_insert_demo.c}}
|
||||
```
|
||||
|
||||
The example code for binding parameters with stmt is as follows:
|
||||
|
||||
```c
|
||||
{{#include docs/examples/c/stmt_insert_demo.c}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="REST API" value="rest">
|
||||
Not supported
|
||||
|
|
|
@ -298,13 +298,53 @@ select max_vol(vol1, vol2, vol3, deviceid) from battery;
|
|||
|
||||
</details>
|
||||
|
||||
#### Aggregate Function Example 3 Split string and calculate average value [extract_avg](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/extract_avg.c)
|
||||
|
||||
The `extract_avg` function converts a comma-separated string sequence into a set of numerical values, counts the results of all rows, and calculates the final average. Note when implementing:
|
||||
- `interBuf->numOfResult` needs to return 1 or 0 and cannot be used for count.
|
||||
- Count can use additional caches, such as the `SumCount` structure.
|
||||
- Use `varDataVal` to obtain the string.
|
||||
|
||||
Create table:
|
||||
|
||||
```shell
|
||||
create table scores(ts timestamp, varStr varchar(128));
|
||||
```
|
||||
|
||||
Create custom function:
|
||||
|
||||
```shell
|
||||
create aggregate function extract_avg as '/root/udf/libextract_avg.so' outputtype double bufsize 16 language 'C';
|
||||
```
|
||||
|
||||
Use custom function:
|
||||
|
||||
```shell
|
||||
select extract_avg(valStr) from scores;
|
||||
```
|
||||
|
||||
Generate `.so` file
|
||||
```bash
|
||||
gcc -g -O0 -fPIC -shared extract_vag.c -o libextract_avg.so
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>max_vol.c</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/max_vol.c}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## Developing UDFs in Python Language
|
||||
|
||||
### Environment Setup
|
||||
|
||||
The specific steps to prepare the environment are as follows:
|
||||
|
||||
- Step 1, prepare the Python runtime environment.
|
||||
- Step 1, prepare the Python runtime environment. If you compile and install Python locally, be sure to enable the `--enable-shared` option, otherwise the subsequent installation of taospyudf will fail due to failure to generate a shared library.
|
||||
- Step 2, install the Python package taospyudf. The command is as follows.
|
||||
|
||||
```shell
|
||||
|
|
|
@ -339,20 +339,272 @@ Helm operates Kubernetes using kubectl and kubeconfig configurations, which can
|
|||
The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
|
||||
|
||||
```shell
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-enterprise-3.5.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-3.5.0.tgz
|
||||
```
|
||||
|
||||
Note that it's for the enterprise edition, and the community edition is not yet available.
|
||||
|
||||
Follow the steps below to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
# Edit the values.yaml file to set the topology of the cluster
|
||||
vim values.yaml
|
||||
helm install tdengine tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
#### Case 1: Simple 1-node Deployment
|
||||
If you are using community images, you can use the following command to install TDengine with Helm Chart:
|
||||
|
||||
<details>
|
||||
<summary>Helm Chart Use Cases for Community</summary>
|
||||
|
||||
#### Community Case 1: Simple 1-node Deployment
|
||||
|
||||
The following is a simple example of deploying a single-node TDengine cluster using Helm.
|
||||
|
||||
```yaml
|
||||
# This example is a simple deployment with one server replica.
|
||||
name: "tdengine"
|
||||
|
||||
image:
|
||||
repository: # Leave a trailing slash for the repository, or "" for no repository
|
||||
server: tdengine/tdengine:latest
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
labels:
|
||||
app: "tdengine"
|
||||
# Add more labels as needed.
|
||||
|
||||
services:
|
||||
server:
|
||||
type: ClusterIP
|
||||
replica: 1
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp: [6041, 6030, 6060]
|
||||
# UDP range, optional
|
||||
udp:
|
||||
volumes:
|
||||
- name: data
|
||||
mountPath: /var/lib/taos
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
- name: log
|
||||
mountPath: /var/log/taos/
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
files:
|
||||
- name: cfg # must be lower case.
|
||||
mountPath: /etc/taos/taos.cfg
|
||||
content: |
|
||||
dataDir /var/lib/taos/
|
||||
logDir /var/log/taos/
|
||||
```
|
||||
|
||||
Let's explain the above configuration:
|
||||
|
||||
- name: The name of the deployment, here it is "tdengine".
|
||||
- image:
|
||||
- repository: The image repository address, remember to leave a trailing slash for the repository, or set it to an empty string to use docker.io.
|
||||
- server: The specific name and tag of the server image. You need to ask your business partner for the TDengine Enterprise image.
|
||||
- timezone: Set the timezone, here it is "Asia/Shanghai".
|
||||
- labels: Add labels to the deployment, here is an app label with the value "tdengine", more labels can be added as needed.
|
||||
- services:
|
||||
- server: Configure the server service.
|
||||
- type: The service type, here it is **ClusterIP**.
|
||||
- replica: The number of replicas, here it is 1.
|
||||
- ports: Configure the ports of the service.
|
||||
- tcp: The required TCP port range, here it is [6041, 6030, 6060].
|
||||
- udp: The optional UDP port range, which is not configured here.
|
||||
- volumes: Configure the volumes.
|
||||
- name: The name of the volume, here there are two volumes, data and log.
|
||||
- mountPath: The mount path of the volume.
|
||||
- spec: The specification of the volume.
|
||||
- storageClassName: The storage class name, here it is **local-path**.
|
||||
- accessModes: The access mode, here it is **ReadWriteOnce**.
|
||||
- resources.requests.storage: The requested storage size, here it is **10Gi**.
|
||||
- files: Configure the files to mount in TDengine server.
|
||||
- name: The name of the file, here it is **cfg**.
|
||||
- mountPath: The mount path of the file, which is **taos.cfg**.
|
||||
- content: The content of the file, here the **dataDir** and **logDir** are configured.
|
||||
|
||||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install simple tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
After installation, you can see the instructions to see the status of the TDengine cluster:
|
||||
|
||||
```shell
|
||||
NAME: simple
|
||||
LAST DEPLOYED: Sun Feb 9 13:40:00 2025 default
|
||||
STATUS: deployed
|
||||
REVISION: 1
|
||||
TEST SUITE: None
|
||||
NOTES:
|
||||
1. Get first POD name:
|
||||
|
||||
export POD_NAME=$(kubectl get pods --namespace default \
|
||||
-l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=simple" -o jsonpath="{.items[0].metadata.name}")
|
||||
|
||||
2. Show dnodes/mnodes:
|
||||
|
||||
kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
|
||||
|
||||
3. Run into TDengine CLI:
|
||||
|
||||
kubectl --namespace default exec -it $POD_NAME -- taos
|
||||
```
|
||||
|
||||
Follow the instructions to check the status of the TDengine cluster:
|
||||
|
||||
```shell
|
||||
root@u1-58:/data1/projects/helm# kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
|
||||
Welcome to the TDengine Command Line Interface, Client Version:3.3.5.8
|
||||
Copyright (c) 2023 by TDengine, all rights reserved.
|
||||
|
||||
taos> show dnodes; show mnodes
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note |
|
||||
=============================================================================================================================================================================
|
||||
1 | oss-tdengine-0.oss-tdengine... | 0 | 21 | ready | 2025-03-12 19:05:42.224 | 2025-03-12 19:05:42.044 | |
|
||||
Query OK, 1 row(s) in set (0.002545s)
|
||||
|
||||
id | endpoint | role | status | create_time | role_time |
|
||||
==================================================================================================================================
|
||||
1 | oss-tdengine-0.oss-tdengine... | leader | ready | 2025-03-12 19:05:42.239 | 2025-03-12 19:05:42.137 |
|
||||
Query OK, 1 row(s) in set (0.001343s)
|
||||
```
|
||||
|
||||
To clean up the TDengine cluster, use the following command:
|
||||
|
||||
```shell
|
||||
helm uninstall simple
|
||||
kubectl delete pvc -l app.kubernetes.io/instance=simple
|
||||
```
|
||||
|
||||
#### Community Case 2: 3-replica Deployment with Single taosX
|
||||
|
||||
```yaml
|
||||
# This example shows how to deploy a 3-replica TDengine cluster with separate taosx/explorer service.
|
||||
# Users should know that the explorer/taosx service is not cluster-ready, so it is recommended to deploy it separately.
|
||||
name: "tdengine"
|
||||
|
||||
image:
|
||||
repository: # Leave a trailing slash for the repository, or "" for no repository
|
||||
server: tdengine/tdengine:latest
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
labels:
|
||||
# Add more labels as needed.
|
||||
|
||||
services:
|
||||
server:
|
||||
type: ClusterIP
|
||||
replica: 3
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp: [6041, 6030]
|
||||
# UDP range, optional
|
||||
udp:
|
||||
volumes:
|
||||
- name: data
|
||||
mountPath: /var/lib/taos
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
- name: log
|
||||
mountPath: /var/log/taos/
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
```
|
||||
|
||||
You can see that the configuration is similar to the first one, with the addition of the taosx configuration. The taosx service is configured with similar storage configuration as the server service, and the server service is configured with 3 replicas. Since the taosx service is not cluster-ready, it is recommended to deploy it separately.
|
||||
|
||||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install replica3 tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
To clean up the TDengine cluster, use the following command:
|
||||
|
||||
```shell
|
||||
helm uninstall replica3
|
||||
kubectl delete pvc -l app.kubernetes.io/instance=replica3
|
||||
```
|
||||
|
||||
You can use the following command to expose the explorer service to the outside world with ingress:
|
||||
|
||||
```shell
|
||||
tee replica3-ingress.yaml <<EOF
|
||||
# This is a helm chart example for deploying 3 replicas of TDengine Explorer
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: replica3-ingress
|
||||
namespace: default
|
||||
spec:
|
||||
rules:
|
||||
- host: replica3.local.tdengine.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: replica3-tdengine-taosx
|
||||
port:
|
||||
number: 6060
|
||||
EOF
|
||||
|
||||
kubectl apply -f replica3-ingress.yaml
|
||||
```
|
||||
|
||||
Use `kubectl get ingress` to view the ingress service.
|
||||
|
||||
```shell
|
||||
root@server:/data1/projects/helm# kubectl get ingress
|
||||
NAME CLASS HOSTS ADDRESS PORTS AGE
|
||||
replica3-ingress nginx replica3.local.tdengine.com 192.168.1.58 80 48m
|
||||
```
|
||||
|
||||
You can configure the domain name resolution to point to the ingress service's external IP address. For example, add the following line to the hosts file:
|
||||
|
||||
```conf
|
||||
192.168.1.58 replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
Now you can access the explorer service through the domain name `replica3.local.tdengine.com`.
|
||||
|
||||
```shell
|
||||
curl http://replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
With TDengine Enterprise images, you can use the following command to install TDengine with Helm Chart:
|
||||
|
||||
<details>
|
||||
<summary>Helm Chart Use Cases for Enterprise</summary>
|
||||
|
||||
#### Enterprise Case 1: Simple 1-node Deployment
|
||||
|
||||
The following is a simple example of deploying a single-node TDengine cluster using Helm.
|
||||
|
||||
|
@ -435,7 +687,7 @@ Let's explain the above configuration:
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install simple tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install simple tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
After installation, you can see the instructions to see the status of the TDengine cluster:
|
||||
|
@ -487,7 +739,7 @@ helm uninstall simple
|
|||
kubectl delete pvc -l app.kubernetes.io/instance=simple
|
||||
```
|
||||
|
||||
#### Case 2: Tiered-Storage Deployment
|
||||
#### Enterprise Case 2: Tiered-Storage Deployment
|
||||
|
||||
The following is an example of deploying a TDengine cluster with tiered storage using Helm.
|
||||
|
||||
|
@ -563,10 +815,10 @@ You can see that the configuration is similar to the previous one, with the addi
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install tiered tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install tiered tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
#### Case 3: 2-replica Deployment
|
||||
#### Enterprise Case 3: 2-replica Deployment
|
||||
|
||||
TDengine support 2-replica deployment with an arbitrator, which can be configured as follows:
|
||||
|
||||
|
@ -634,7 +886,7 @@ services:
|
|||
|
||||
You can see that the configuration is similar to the first one, with the addition of the arbitrator configuration. The arbitrator service is configured with the same storage as the server service, and the server service is configured with 2 replicas (the arbitrator should be 1 replica and not able to be changed).
|
||||
|
||||
#### Case 4: 3-replica Deployment with Single taosX
|
||||
#### Enterprise Case 4: 3-replica Deployment with Single taosX
|
||||
|
||||
```yaml
|
||||
# This example shows how to deploy a 3-replica TDengine cluster with separate taosx/explorer service.
|
||||
|
@ -761,7 +1013,7 @@ You can see that the configuration is similar to the first one, with the additio
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install replica3 tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install replica3 tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
You can use the following command to expose the explorer service to the outside world with ingress:
|
||||
|
@ -810,3 +1062,5 @@ Now you can access the explorer service through the domain name `replica3.local.
|
|||
```shell
|
||||
curl http://replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
</details>
|
||||
|
|
|
@ -72,8 +72,16 @@ TDengine Enterprise implements incremental backup and recovery of data by using
|
|||
7. **Directory:** Enter the full path of the directory in which you want to store backup files.
|
||||
8. **Backup file max size:** Enter the maximum size of a single backup file. If the total size of your backup exceeds this number, the backup is split into multiple files.
|
||||
9. **Compression level:** Select **fastest** for the fastest performance but lowest compression ratio, **best** for the highest compression ratio but slowest performance, or **balanced** for a combination of performance and compression.
|
||||
|
||||
4. Click **Confirm** to create the backup plan.
|
||||
4. Users can enable S3 dumping to upload backup files to the S3 storage service. To enable S3 dumping, the following information needs to be provided:
|
||||
1. **Endpoint**: The address of the S3 endpoint.
|
||||
2. **Access Key ID**: The access key ID for authentication.
|
||||
3. **Secret Access Key**: The secret access key for authentication.
|
||||
4. **Bucket**: The name of the target bucket.
|
||||
5. **Region**: The region where the bucket is located.
|
||||
6. **Object Prefix**: A prefix for backup file objects, similar to a directory path on S3.
|
||||
7. **Backup Retention Period**: The retention duration for local backups. All files older than `current time - backup_retention_period` must be uploaded to S3.
|
||||
8. **Backup Retention Count**: The number of local backups to retain. Only the latest `backup_retention_size` backup files are kept locally.
|
||||
5. Click **Confirm** to create the backup plan.
|
||||
|
||||
You can view your backup plans and modify, clone, or delete them using the buttons in the **Operation** columns. Click **Refresh** to update the status of your plans. Note that you must stop a backup plan before you can delete it. You can also click **View** in the **Backup File** column to view the backup record points and files created by each plan.
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ When network I/O and other processing resources are not bottlenecks, by optimizi
|
|||
|
||||
Generally, when TDengine needs to select a mount point from the same level to create a new data file, it uses a round-robin strategy for selection. However, in reality, each disk may have different capacities, or the same capacity but different amounts of data written, leading to an imbalance in available space on each disk. In practice, this may result in selecting a disk with very little remaining space.
|
||||
|
||||
To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes, and its value should be greater than 2GB, i.e., mount points with less than 2GB of available space will be skipped.
|
||||
To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes. If its value is set as 2GB, i.e., mount points with less than 2GB of available space will be skipped.
|
||||
|
||||
Starting from version 3.3.2.0, a new configuration `disable_create_new_file` has been introduced to control the prohibition of generating new files on a certain mount point. The default value is `false`, which means new files can be generated on each mount point by default.
|
||||
|
||||
|
|
|
@ -0,0 +1,278 @@
|
|||
---
|
||||
sidebar_label: Security Configuration
|
||||
title: Security Configuration
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Image from '@theme/IdealImage';
|
||||
import imgEcosys from '../assets/tdengine-components-01.png';
|
||||
|
||||
## Background
|
||||
|
||||
The distributed and multi-component nature of TDengine makes its security configuration a concern in production systems. This document aims to explain the security issues of various TDengine components and different deployment methods, and provide deployment and configuration suggestions to support the security of user data.
|
||||
|
||||
## Components Involved in Security Configuration
|
||||
|
||||
TDengine includes multiple components:
|
||||
|
||||
- `taosd`: Core component.
|
||||
- `taosc`: Client library.
|
||||
- `taosAdapter`: REST API and WebSocket service.
|
||||
- `taosKeeper`: Monitoring service component.
|
||||
- `taosX`: Data pipeline and backup recovery component.
|
||||
- `taosxAgent`: Auxiliary component for external data source access.
|
||||
- `taosExplorer`: Web visualization management interface.
|
||||
|
||||
In addition to TDengine deployment and applications, there are also the following components:
|
||||
|
||||
- Applications that access and use the TDengine database through various connectors.
|
||||
- External data sources: Other data sources that access TDengine, such as MQTT, OPC, Kafka, etc.
|
||||
|
||||
The relationship between the components is as follows:
|
||||
|
||||
<figure>
|
||||
<Image img={imgEcosys} alt="TDengine ecosystem"/>
|
||||
<figcaption>TDengine ecosystem</figcaption>
|
||||
</figure>
|
||||
|
||||
## TDengine Security Settings
|
||||
|
||||
### `taosd`
|
||||
|
||||
The `taosd` cluster uses TCP connections based on its own protocol for data exchange, which has low risk, but the transmission process is not encrypted, so there is still some security risk.
|
||||
|
||||
Enabling compression may help with TCP data obfuscation.
|
||||
|
||||
- **compressMsgSize**: Whether to compress RPC messages. Integer, optional: -1: Do not compress any messages; 0: Compress all messages; N (N>0): Only compress messages larger than N bytes.
|
||||
|
||||
To ensure the traceability of database operations, it is recommended to enable the audit function.
|
||||
|
||||
- **audit**: Audit function switch, 0 is off, 1 is on. Default is on.
|
||||
- **auditInterval**: Reporting interval, in milliseconds. Default is 5000.
|
||||
- **auditCreateTable**: Whether to enable the audit function for creating sub-tables. 0 is off, 1 is on. Default is on.
|
||||
|
||||
To ensure the security of data files, database encryption can be enabled.
|
||||
|
||||
- **encryptAlgorithm**: Data encryption algorithm.
|
||||
- **encryptScope**: Data encryption scope.
|
||||
|
||||
Enabling the whitelist can restrict access addresses and further enhance privacy.
|
||||
|
||||
- **enableWhiteList**: Whitelist function switch, 0 is off, 1 is on; default is off.
|
||||
|
||||
### `taosc`
|
||||
|
||||
Users and other components use the native client library (`taosc`) and its own protocol to connect to `taosd`, which has low data security risk, but the transmission process is still not encrypted, so there is some security risk.
|
||||
|
||||
### `taosAdapter`
|
||||
|
||||
`taosAdapter` uses the native client library (`taosc`) and its own protocol to connect to `taosd`, and also supports RPC message compression, so there is no data security issue.
|
||||
|
||||
Applications and other components connect to `taosAdapter` through various language connectors. By default, the connection is based on HTTP 1.1 and is not encrypted. To ensure the security of data transmission between `taosAdapter` and other components, SSL encrypted connections need to be configured. Modify the following configuration in the `/etc/taos/taosadapter.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
enable = true
|
||||
certFile = "/path/to/certificate-file"
|
||||
keyFile = "/path/to/private-key"
|
||||
```
|
||||
|
||||
Configure HTTPS/SSL access in the connector to complete encrypted access.
|
||||
|
||||
To further enhance security, the whitelist function can be enabled, and configured in `taosd`, which also applies to the `taosAdapter` component.
|
||||
|
||||
### `taosX`
|
||||
|
||||
`taosX` includes REST API and gRPC interfaces, where the gRPC interface is used for `taos-agent` connections.
|
||||
|
||||
- The REST API interface is based on HTTP 1.1 and is not encrypted, posing a security risk.
|
||||
- The gRPC interface is based on HTTP 2 and is not encrypted, posing a security risk.
|
||||
|
||||
To ensure data security, it is recommended that the `taosX` API interface is limited to internal access only. Modify the following configuration in the `/etc/taos/taosx.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
listen = "127.0.0.1:6050"
|
||||
grpc = "127.0.0.1:6055"
|
||||
```
|
||||
|
||||
Starting from TDengine 3.3.6.0, `taosX` supports HTTPS connections. Add the following configuration in the `/etc/taos/taosx.toml` file:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
ssl_cert = "/path/to/server.pem"
|
||||
ssl_key = "/path/to/server.key"
|
||||
ssl_ca = "/path/to/ca.pem"
|
||||
```
|
||||
|
||||
And modify the API address to HTTPS connection in Explorer:
|
||||
|
||||
```toml
|
||||
# Local connection to taosX API
|
||||
x_api = "https://127.0.01:6050"
|
||||
# Public IP or domain address
|
||||
grpc = "https://public.domain.name:6055"
|
||||
```
|
||||
|
||||
### `taosExplorer`
|
||||
|
||||
Similar to the `taosAdapter` component, the `taosExplorer` component provides HTTP services for external access. Modify the following configuration in the `/etc/taos/explorer.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
# SSL certificate file
|
||||
certificate = "/path/to/ca.file"
|
||||
|
||||
# SSL certificate private key
|
||||
certificate_key = "/path/to/key.file"
|
||||
```
|
||||
|
||||
Then, use HTTPS to access Explorer, such as [https://192.168.12.34](https://192.168.12.34:6060).
|
||||
|
||||
### `taosxAgent`
|
||||
|
||||
After `taosX` enables HTTPS, the `Agent` component and `taosX` use HTTP 2 encrypted connections, using Arrow-Flight RPC for data exchange. The transmission content is in binary format, and only registered `Agent` connections are valid, ensuring data security.
|
||||
|
||||
It is recommended to always enable HTTPS connections for `Agent` services in insecure or public network environments.
|
||||
|
||||
### `taosKeeper`
|
||||
|
||||
`taosKeeper` uses WebSocket connections to communicate with `taosAdapter`, writing monitoring information reported by other components into TDengine.
|
||||
|
||||
The current version of `taosKeeper` has security risks:
|
||||
|
||||
- The monitoring address cannot be restricted to the local machine. By default, it monitors all addresses on port 6043, posing a risk of network attacks. This risk can be ignored when deploying with Docker or Kubernetes without exposing the `taosKeeper` port.
|
||||
- The configuration file contains plaintext passwords, so the visibility of the configuration file needs to be reduced. In `/etc/taos/taoskeeper.toml`:
|
||||
|
||||
```toml
|
||||
[tdengine]
|
||||
host = "localhost"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
usessl = false
|
||||
```
|
||||
|
||||
## Security Enhancements
|
||||
|
||||
We recommend using TDengine within a local area network.
|
||||
|
||||
If you must provide access outside the local area network, consider adding the following configurations:
|
||||
|
||||
### Load Balancing
|
||||
|
||||
Use load balancing to provide `taosAdapter` services externally.
|
||||
|
||||
Take Nginx as an example to configure multi-node load balancing:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 6041;
|
||||
|
||||
location / {
|
||||
proxy_pass http://websocket;
|
||||
# Headers for websocket compatible
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
# Forwarded headers
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Server $hostname;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
upstream websocket {
|
||||
server 192.168.11.61:6041;
|
||||
server 192.168.11.62:6041;
|
||||
server 192.168.11.63:6041;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If the `taosAdapter` component is not configured with SSL secure connections, SSL needs to be configured to ensure secure access. SSL can be configured at a higher-level API Gateway or in Nginx; if you have stronger security requirements for the connections between components, you can configure SSL in all components. The Nginx configuration is as follows:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
|
||||
ssl_certificate /path/to/your/certificate.crt;
|
||||
ssl_certificate_key /path/to/your/private.key;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Security Gateway
|
||||
|
||||
In modern internet production systems, the use of security gateways is also very common. [traefik](https://traefik.io/) is a good open-source choice. We take traefik as an example to explain the security configuration in the API gateway.
|
||||
|
||||
Traefik provides various security configurations through middleware, including:
|
||||
|
||||
1. Authentication: Traefik provides multiple authentication methods such as BasicAuth, DigestAuth, custom authentication middleware, and OAuth 2.0.
|
||||
2. IP Whitelist: Restrict the allowed client IPs.
|
||||
3. Rate Limit: Control the number of requests sent to the service.
|
||||
4. Custom Headers: Add configurations such as `allowedHosts` through custom headers to improve security.
|
||||
|
||||
A common middleware example is as follows:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)"
|
||||
- "traefik.http.routers.tdengine.entrypoints=https"
|
||||
- "traefik.http.routers.tdengine.tls.certresolver=default"
|
||||
- "traefik.http.routers.tdengine.service=tdengine"
|
||||
- "traefik.http.services.tdengine.loadbalancer.server.port=6041"
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
- "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue"
|
||||
- "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true"
|
||||
- "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
|
||||
- "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist"
|
||||
```
|
||||
|
||||
The above example completes the following configurations:
|
||||
|
||||
- TLS authentication uses the `default` configuration, which can be configured in the configuration file or traefik startup parameters, as follows:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: "traefik:v2.3.2"
|
||||
hostname: "traefik"
|
||||
networks:
|
||||
- traefik
|
||||
command:
|
||||
- "--log.level=INFO"
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--providers.docker.swarmmode=true"
|
||||
- "--providers.docker.network=traefik"
|
||||
- "--providers.docker.watch=true"
|
||||
- "--entrypoints.http.address=:80"
|
||||
- "--entrypoints.https.address=:443"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge=true"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.provider=alidns"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com"
|
||||
- "--certificatesresolvers.default.acme.email=linhehuo@gmail.com"
|
||||
- "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json"
|
||||
```
|
||||
|
||||
The above startup parameters configure the `default` TSL certificate resolver and automatic acme authentication (automatic certificate application and renewal).
|
||||
|
||||
- Middleware `redirect-to-https`: Configure redirection from HTTP to HTTPS, forcing the use of secure connections.
|
||||
|
||||
```yaml
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
```
|
||||
|
||||
- Middleware `check-header`: Configure custom header checks. External access must add custom headers and match header values to prevent unauthorized access. This is a very simple and effective security mechanism when providing API access.
|
||||
- Middleware `tdengine-ipwhitelist`: Configure IP whitelist. Only allow specified IPs to access, using CIDR routing rules for matching, and can set internal and external IP addresses.
|
||||
|
||||
## Summary
|
||||
|
||||
Data security is a key indicator of the TDengine product. These measures are designed to protect TDengine deployments from unauthorized access and data breaches while maintaining performance and functionality. However, the security configuration of TDengine itself is not the only guarantee in production. It is more important to develop solutions that better match customer needs in combination with the user's business system.
|
|
@ -0,0 +1,81 @@
|
|||
---
|
||||
sidebar_label: Perspective
|
||||
title: Integration With Perspective
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
Perspective is an open-source and powerful data visualization library developed by [Prospective.co](https://www.perspective.co/). Leveraging the technologies of WebAssembly and Web Workers, it enables interactive real-time data analysis in web applications and provides high-performance visualization capabilities on the browser side. With its help, developers can build dashboards, charts, etc. that update in real time, and users can easily interact with the data, filtering, sorting, and exploring it as needed. It boasts high flexibility, adapting to various data formats and business scenarios. It is also fast, ensuring smooth interaction even when dealing with large-scale data. Moreover, it has excellent usability, allowing both beginners and professional developers to quickly build visualization interfaces.
|
||||
|
||||
In terms of data connection, Perspective, through the Python connector of TDengine, perfectly supports TDengine data sources. It can efficiently retrieve various types of data, such as massive time-series data, from TDengine. Additionally, it offers real-time functions including the display of complex charts, in-depth statistical analysis, and trend prediction, helping users gain insights into the value of the data and providing strong support for decision-making. It is an ideal choice for building applications with high requirements for real-time data visualization and analysis.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Perform the following installation operations in the Linux system:
|
||||
|
||||
- TDengine is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Python version 3.10 or higher has been installed (if not installed, please refer to [Python Installation](https://docs.python.org/)).
|
||||
- Download or clone the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project. After entering the root directory of the project, run the "install.sh" script to download and install the TDengine client library and related dependencies locally.
|
||||
|
||||
## Visualize data
|
||||
|
||||
**Step 1**, Run the "run.sh" script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project to start the Perspective service. This service will retrieve data from the TDengine database every 300 milliseconds and transmit the data in a streaming form to the web-based `Perspective Viewer`.
|
||||
|
||||
```shell
|
||||
sh run.sh
|
||||
```
|
||||
|
||||
**Step 2**, Start a static web service. Then, access the prsp-viewer.html resource in the browser, and the visualized data can be displayed.
|
||||
|
||||
```python
|
||||
python -m http.server 8081
|
||||
```
|
||||
|
||||
The effect presented after accessing the web page through the browser is shown in the following figure:
|
||||
|
||||

|
||||
|
||||
## Instructions for use
|
||||
|
||||
### Write Data to TDengine
|
||||
|
||||
The `producer.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project can periodically insert data into the TDengine database with the help of the TDengine Python connector. This script will generate random data and insert it into the database, thus simulating the process of writing real-time data. The specific execution steps are as follows:
|
||||
|
||||
1. Establish a connection to TDengine.
|
||||
2. Create the `power` database and the `meters` table.
|
||||
3. Generate random data every 300 milliseconds and write it into the TDengine database.
|
||||
|
||||
For detailed instructions on writing using the Python connector, please refer to [Python Parameter Binding](../../../tdengine-reference/client-libraries/python/#parameter-binding).
|
||||
|
||||
### Load Data from TDengine
|
||||
|
||||
The `perspective_server.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project will start a Perspective server. This server will read data from TDengine and stream the data to a Perspective table via the Tornado WebSocket.
|
||||
|
||||
1. Start a Perspective server.
|
||||
2. Establish a connection to TDengine.
|
||||
3. Create a Perspective table (the table structure needs to match the type of the table in the TDengine database).
|
||||
4. Call the `Tornado.PeriodicCallback` function to start a scheduled task, thereby achieving the update of the data in the Perspective table. The sample code is as follows:
|
||||
|
||||
```python
|
||||
{{#include docs/examples/perspective/perspective_server.py:perspective_server}}
|
||||
```
|
||||
|
||||
### HTML Page Configuration
|
||||
|
||||
The `prsp-viewer.html` file in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project embeds the `Perspective Viewer` into the HTML page. It connects to the Perspective server via a WebSocket and displays real-time data according to the chart configuration.
|
||||
|
||||
- Configure the displayed charts and the rules for data analysis.
|
||||
- Establish a Websocket connection with the Perspective server.
|
||||
- Import the Perspective library, connect to the Perspective server via a WebSocket, and load the `meters_values` table to display dynamic data.
|
||||
|
||||
```html
|
||||
{{#include docs/examples/perspective/prsp-viewer.html:perspective_viewer}}
|
||||
```
|
||||
|
||||
## Reference Materials
|
||||
|
||||
- [Perspective Docs](https://perspective.finos.org/)
|
||||
- [TDengine Python Connector](../../../tdengine-reference/client-libraries/python/)
|
||||
- [TDengine Stream Processing](../../../advanced-features/stream-processing/)
|
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 62 KiB |
|
@ -14,8 +14,8 @@ Power BI is a business analytics tool provided by Microsoft. By configuring the
|
|||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
**Step 2**, Open Power BI and log in, click [Home] -> [Get Data] -> [Other] -> [ODBC] -> [Connect], add data source.
|
||||
|
||||
**Step 3**, Select the data source name just created, such as [MyTDengine], if you need to enter SQL, you can click the [Advanced options] tab, in the expanded dialog box enter the SQL statement. Click the [OK] button to connect to the configured data source.
|
||||
|
|
|
@ -13,11 +13,11 @@ Prepare the following environment:
|
|||
- TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install and run Tableau Desktop (if not installed, please download and install Windows operating system 64-bit [Download Tableau Desktop](https://www.tableau.com/products/desktop/download)). Install Tableau please refer to [Tableau Desktop](https://www.tableau.com).
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, Search and open the "ODBC Data Source (64 bit)" management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
**Step 1**, Search and open the "ODBC Data Source (64 bit)" management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
:::tip
|
||||
It should be noted that when configuring the ODBC data source for Tableau, the [Database] configuration item on the TDengine ODBC data source configuration page is required. You need to select a database that can be successfully connected.
|
||||
|
@ -27,19 +27,19 @@ It should be noted that when configuring the ODBC data source for Tableau, the [
|
|||
|
||||
**Step 3**, Click the `DSN` radio button, then select the configured data source (MyTDengine), and click the `Connect` button. After the connection is successful, delete the content of the string attachment, and finally click the `Sign In` button.
|
||||
|
||||

|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
**Step 1**, In the workbook page, the connected data sources will be displayed. Clicking on the dropdown list of databases will display the databases that require data analysis. On this basis, click the search button in the table options to display all tables in the database. Then, drag the table to be analyzed to the right area to display the table structure.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 2**, Click the `Update Now` button below to display the data in the table.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 3**, Click on the "Worksheet" at the bottom of the window to pop up the data analysis window, which displays all the fields of the analysis table. Drag the fields to the rows and columns to display the chart.
|
||||
|
||||

|
||||

|
||||
|
||||
|
|
|
@ -13,30 +13,30 @@ Prepare the following environment:
|
|||
- TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install and run Excel. If not installed, please download and install it. For specific instructions, please refer to Microsoft's official documentation.
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
**Step 2**, Start Excel in the Windows system environment, then select [Data] -> [Get Data] -> [From Other Sources] -> [From ODBC].
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 3**, In the pop-up window, select the data source you need to connect to from the drop-down list of [Data source name (DSN)], and then click the [OK] button.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 4**, Enter the username and password for TDengine.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 5**, In the pop-up [Navigator] dialog box, select the database tables you want to load, and then click [Load] to complete the data loading.
|
||||
|
||||

|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
Select the imported data. On the [Insert] tab, choose the column chart, and then configure the data fields in the [PivotChart Fields] pane on the right.
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
---
|
||||
sidebar_label: FineBI
|
||||
title: Integration With FineBI
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
Fanruan is a technology company specializing in the field of business intelligence and data analytics. With its self-developed core products, FineBI and FineReport, the company has established a leading position in the industry. Fanruan's BI tools are widely adopted by enterprises across various sectors, empowering users to achieve data visualization analysis, report generation, and data-driven decision support.
|
||||
|
||||
By using the TDengine Java connector, FineBI can quickly access the data in TDengine. Users can directly connect to the TDengine database in FineBI, obtain time-series data for analysis, and create visual reports, and the entire process does not require any code writing.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- TDengine 3.3.4.0 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install FineBI (if not installed, please download and install [Download FineBI](https://intl.finebi.com/download)).
|
||||
- Download the fine_conf_entity plugin to support the addition of JDBC drivers, [Download link](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d).
|
||||
- Install the JDBC driver. Download the `TDengine JDBC connector` file `taos-jdbcdriver-3.4.0-dist.jar` or a higher version from `maven.org`.
|
||||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, In the `db.script` configuration file of the FineBI server, find the `SystemConfig.driverUpload` configuration item and change its value to true.
|
||||
|
||||
- Windows system: The path of the configuration file is webapps/webroot/WEB-INF/embed/finedb/db.script under the installation directory.
|
||||
- Linux/Mac system: The path of the configuration file is /usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script.
|
||||
|
||||
**Step 2**, Start the FineBI service. Enter `http://ip:37799/webroot/decision` in the browser, where "ip" is the IP address of the FineBI server.
|
||||
|
||||
**Step 3**, After logging in to the FineBI Web page, click [System Management] -> [Plugin Management]. In the [Store App] on the right side, click [Install From Local] and select the downloaded `fine_conf_entity` plugin for installation.
|
||||
|
||||

|
||||
|
||||
**Step 4**, Click [System Management] -> [Data Connection] -> [Data Connection Management]. On the right-hand page, click the [Driver Management] button to open the configuration page. Then click the [New Driver] button, and in the pop-up window, enter a name (for example, `tdengine-websocket`) to configure the JDBC driver.
|
||||
|
||||

|
||||
|
||||
**Step 5**, On the driver configuration page, click the [Upload File] button. Select the downloaded TDengine Java Connector (e.g., `taos-jdbcdriver-3.4.0-dist.jar`) for uploading. After the upload is complete, select `com.taosdata.jdbc.ws.WebSocketDriver` from the drop-down list of [Driver], and then click [Save].
|
||||
|
||||

|
||||
|
||||
**Step 6**, On the "Data Connection Management" page, click the [New Data Connection] button. Subsequently, click "Others", and then on the right-side page, click "Other JDBC" to perform the connection configuration.
|
||||
|
||||

|
||||
|
||||
**Step 7**, On the configuration page, first enter the name of the data connection. Then, select "Custom" in the [Driver] option and choose the configured driver from the drop-down list (e.g., `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`). After that, configure the "Data Connection URL" (e.g., `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`). Once the settings are completed, click [Test Connection] in the top-right corner to test the connection. After the verification is successful, click [Save] to finish the configuration.
|
||||
|
||||
:::tip
|
||||
`fineBIDialect=mysql` The meaning of this setting is to adopt the SQL dialect rules of the MySQL database. Simply put, it tells FineBI to parse and execute relevant queries and operations in the specific way that the MySQL database handles SQL statements.
|
||||
:::
|
||||
|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
### Data preparation
|
||||
|
||||
**Step 1**, Click [Public Data]. On the right - hand page, click [New Folder] to create a folder (e.g., TDengine). Then, click the [+] button on the right side of the folder to create a "Database Table" dataset or an "SQL Dataset".
|
||||
|
||||

|
||||
|
||||
**Step 2**, Click "Database Table" to open the database table selection page. In the "Data Connection" section on the left, select the previously created connection. Then, all the tables in the database of the current connection will be displayed on the right. Select the table you need to load (e.g., meters), and click [OK]. The data in the meters table will then be displayed.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
**Step 3**, Click "SQL Dataset" to open the configuration page for the SQL dataset. First, enter the table name (used for display on the FineBI page). Then, select the previously created connection from the drop-down list of "Data from Data Connection". After that, enter the SQL statement and click "Preview" to view the query results. Finally, click [OK] to successfully create the SQL dataset.
|
||||
|
||||

|
||||
|
||||
### Smart Meter Example
|
||||
|
||||
**Step 1**, Click [My Analysis]. On the right-hand page, click [New Folder] to create a folder (for example, `TDengine`). Then, click the [+] button on the right side of the folder to create an "Analysis Subject".
|
||||
|
||||

|
||||
|
||||
**Step 2**, On the analysis subject page, select the dataset (for example, `meters`) and then click the [OK] button to complete the association of the dataset.
|
||||
|
||||

|
||||
|
||||
**Step 3**, Click the [Component] tab at the bottom of the analysis subject page to open the chart configuration page. Drag the fields to the horizontal axis or the vertical axis, and then the chart will be displayed.
|
||||
|
||||

|
Before Width: | Height: | Size: 300 KiB |
After Width: | Height: | Size: 470 KiB |
Before Width: | Height: | Size: 761 KiB |
After Width: | Height: | Size: 324 KiB |
Before Width: | Height: | Size: 1.3 MiB |
After Width: | Height: | Size: 769 KiB |
Before Width: | Height: | Size: 659 KiB |
After Width: | Height: | Size: 286 KiB |
Before Width: | Height: | Size: 505 KiB |
After Width: | Height: | Size: 205 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 19 KiB |
After Width: | Height: | Size: 21 KiB |
After Width: | Height: | Size: 38 KiB |
After Width: | Height: | Size: 44 KiB |
After Width: | Height: | Size: 33 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 110 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 38 KiB |
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 243 KiB |
After Width: | Height: | Size: 389 KiB |
Before Width: | Height: | Size: 255 KiB |
After Width: | Height: | Size: 543 KiB |
Before Width: | Height: | Size: 226 KiB |
After Width: | Height: | Size: 593 KiB |
Before Width: | Height: | Size: 107 KiB |
After Width: | Height: | Size: 189 KiB |
|
@ -170,7 +170,7 @@ The effective value of charset is UTF-8.
|
|||
|tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||
|minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||
|minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-2199023255552, default value 52428800; Enterprise parameter|
|
||||
|s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||
|s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||
|s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||
|
@ -246,6 +246,9 @@ The effective value of charset is UTF-8.
|
|||
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
|
||||
| streamNotifyMessageSize | After 3.3.6.0 | Not supported | Internal parameter, controls the message size for event notifications, default value is 8192 |
|
||||
| streamNotifyFrameSize | After 3.3.6.0 | Not supported | Internal parameter, controls the underlying frame size when sending event notification messages, default value is 256 |
|
||||
| adapterFqdn | After 3.3.6.0 | Not supported | Internal parameter, The address of the taosadapter services, default value is localhost |
|
||||
| adapterPort | After 3.3.6.0 | Not supported | Internal parameter, The port of the taosadapter services, default value is 6041 |
|
||||
| adapterToken | After 3.3.6.0 | Not supported | Internal parameter, The string obtained by Base64-encoding `{username}:{password}`, default value is `cm9vdDp0YW9zZGF0YQ==` |
|
||||
|
||||
### Log Related
|
||||
|
||||
|
|
|
@ -371,10 +371,15 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
### Query Parameters
|
||||
|
||||
In query scenarios, `filetype` must be set to `query`.
|
||||
`filetype` must be set to `query`.
|
||||
|
||||
`query_mode` connect method:
|
||||
- "taosc": Native.
|
||||
- "rest" : RESTful.
|
||||
|
||||
`query_times` specifies the number of times to run the query, numeric type.
|
||||
|
||||
Query scenarios can control the execution of slow query statements by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters, where threshold controls that queries exceeding the specified exec_usec time will be killed by taosBenchmark, in seconds; interval controls the sleep time to avoid continuous slow query CPU consumption, in seconds.
|
||||
**Note: from version 3.3.5.6 and beyond, simultaneous configuration for `specified_table_query` and `super_table_query` in a JSON file is no longer supported **
|
||||
|
||||
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
|
||||
|
||||
|
@ -382,13 +387,26 @@ For other common parameters, see [General Configuration Parameters](#general-con
|
|||
|
||||
Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`.
|
||||
- **mixed_query** : Query Mode . "yes" is `Mixed Query`, "no" is `General Query`, default is "no".
|
||||
`General Query`:
|
||||
`General Query`:
|
||||
Each SQL in `sqls` starts `threads` threads to query this SQL, Each thread exits after executing the `query_times` queries, and only after all threads executing this SQL have completed can the next SQL be executed.
|
||||
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
|
||||
`Mixed Query`:
|
||||
`Mixed Query`:
|
||||
All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`.
|
||||
|
||||
- **batch_query** : Batch query power switch.
|
||||
"yes": indicates that it is enabled.
|
||||
"no": indicates that it is not enabled, and other values report errors.
|
||||
Batch query refers to dividing all SQL statements in SQL into `threads` groups, with each thread executing one group.
|
||||
Each SQL statement is queried only once before exiting, and the main thread waits for all threads to complete before determining if the `query_interval` parameter is set. If sleep is required for a specified time, each thread group is restarted and the previous process is repeated until the number of queries is exhausted.
|
||||
Functional limitations:
|
||||
- Only supports scenarios where `mixed_query` is set to 'yes'.
|
||||
- Restful queries are not supported, meaning `query_made` cannot be 'rest'.
|
||||
|
||||
- **query_interval** : Query interval, in millisecond, default is 0.
|
||||
When the 'batch_query' switch is turned on, it indicates the interval time after each batch query is completed, When closed, it indicates the interval time between each SQL query completion.
|
||||
If the execution time of the query exceeds the interval time, it will no longer wait. If the execution time of the query is less than the interval time, it is necessary to wait to make up for the interval time.
|
||||
|
||||
- **threads** : Number of threads executing the SQL query, default is 1.
|
||||
- **sqls**:
|
||||
- **sql**: The SQL command to execute, required.
|
||||
|
@ -491,6 +509,15 @@ Note: Data types in the taosBenchmark configuration file must be in lowercase to
|
|||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>queryStb.json</summary>
|
||||
|
||||
```json
|
||||
{{#include /TDengine/tools/taos-tools/example/queryStb.json}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### Subscription Example
|
||||
|
||||
<details>
|
||||
|
|
|
@ -43,6 +43,7 @@ In TDengine, the following data types can be used in the data model of basic tab
|
|||
| 16 | VARCHAR | Custom | Alias for BINARY type |
|
||||
| 17 | GEOMETRY | Custom | Geometry type, supported starting from version 3.1.0.0 |
|
||||
| 18 | VARBINARY | Custom | Variable-length binary data, supported starting from version 3.1.1.0 |
|
||||
| 19 | DECIMAL | 8 or 16 | High-precision numeric type. The range of values depends on the precision and scale specified in the type. Supported starting from version 3.3.6. See the description below. |
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -61,6 +62,18 @@ In TDengine, the following data types can be used in the data model of basic tab
|
|||
- VARBINARY is a data type for storing binary data, with a maximum length of 65,517 bytes for data columns and 16,382 bytes for label columns. Binary data can be written via SQL or schemaless methods (needs to be converted to a string starting with \x), or through stmt methods (can use binary directly). Displayed as hexadecimal starting with \x.
|
||||
|
||||
:::
|
||||
### DECIMAL Data Type
|
||||
|
||||
The `DECIMAL` data type is used for high-precision numeric storage and is supported starting from version 3.3.6. The definition syntax is: `DECIMAL(18, 2)`, `DECIMAL(38, 10)`, where two parameters must be specified: `precision` and `scale`. `Precision` refers to the maximum number of significant digits supported, and `scale` refers to the maximum number of decimal places. For example, `DECIMAL(8, 4)` represents a range of `[-9999.9999, 9999.9999]`. When defining the `DECIMAL` data type, the range of `precision` is `[1, 38]`, and the range of `scale` is `[0, precision]`. If `scale` is 0, it represents integers only. You can also omit `scale`, in which case it defaults to 0. For example, `DECIMAL(18)` is equivalent to `DECIMAL(18, 0)`.
|
||||
|
||||
When the `precision` value is less than or equal to 18, 8 bytes of storage (DECIMAL64) are used internally. When the `precision` is in the range `(18, 38]`, 16 bytes of storage (DECIMAL) are used. When writing `DECIMAL` type data in SQL, numeric values can be written directly. If the value exceeds the maximum representable value for the type, a `DECIMAL_OVERFLOW` error will be reported. If the value does not exceed the maximum representable value but the number of decimal places exceeds the `scale`, it will be automatically rounded. For example, if the type is defined as `DECIMAL(10, 2)` and the value `10.987` is written, the actual stored value will be `10.99`.
|
||||
|
||||
The `DECIMAL` type only supports regular columns and does not currently support tag columns. The `DECIMAL` type supports SQL-based writes only and does not currently support `stmt` or schemaless writes.
|
||||
|
||||
When performing operations between integer types and the `DECIMAL` type, the integer type is converted to the `DECIMAL` type before the calculation. When the `DECIMAL` type is involved in calculations with `DOUBLE`, `FLOAT`, `VARCHAR`, or `NCHAR` types, it is converted to `DOUBLE` type for computation.
|
||||
|
||||
When querying `DECIMAL` type expressions, if the intermediate result of the calculation exceeds the maximum value that the current type can represent, a `DECIMAL_OVERFLOW` error is reported.
|
||||
|
||||
|
||||
## Constants
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ table_options:
|
|||
table_option: {
|
||||
COMMENT 'string_value'
|
||||
| SMA(col_name [, col_name] ...)
|
||||
| KEEP value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -34,6 +35,7 @@ table_option: {
|
|||
- TAGS can have up to 128 columns, at least 1, with a total length not exceeding 16 KB.
|
||||
4. For the use of `ENCODE` and `COMPRESS`, please refer to [Column Compression](../manage-data-compression/)
|
||||
5. For explanations of parameters in table_option, please refer to [Table SQL Description](../manage-tables/)
|
||||
6. Regarding the keep parameter in table_option, it only takes effect for super tables. For detailed explanation of the keep parameter, please refer to [Database Description](02-database.md). The only difference is that the super table's keep parameter does not immediately affect query results, but only takes effect after compaction.
|
||||
|
||||
## View Supertables
|
||||
|
||||
|
@ -144,6 +146,7 @@ alter_table_options:
|
|||
|
||||
alter_table_option: {
|
||||
COMMENT 'string_value'
|
||||
| KEEP value
|
||||
}
|
||||
|
||||
```
|
||||
|
|
|
@ -276,6 +276,15 @@ TDengine supports INNER JOIN based on the timestamp primary key, with the follow
|
|||
5. Both sides of JOIN support subqueries.
|
||||
6. Does not support mixing with the FILL clause.
|
||||
|
||||
## INTERP
|
||||
The INTERP clause is a dedicated syntax for the INTERP function (../function/#interp). When an SQL statement contains an INTERP clause, it can only query the INTERP function and cannot be used with other functions. Additionally, the INTERP clause cannot be used simultaneously with window clauses (window_clause) or group by clauses (group_by_clause). The INTERP function must be used with the RANGE, EVERY, and FILL clauses; stream computing does not support the use of RANGE but requires the use of the EVERY and FILL keywords.
|
||||
- The output time range for INTERP is specified by the RANGE(timestamp1, timestamp2) field, which must satisfy timestamp1 \<= timestamp2. Here, timestamp1 is the start value of the output time range, i.e., if the conditions for interpolation are met at timestamp1, then timestamp1 is the first record output, and timestamp2 is the end value of the output time range, i.e., the timestamp of the last record output cannot be greater than timestamp2.
|
||||
- INTERP determines the number of results within the output time range based on the EVERY(time_unit) field, starting from timestamp1 and interpolating at fixed intervals of time (time_unit value), where time_unit can be time units: 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), 1w (weeks). For example, EVERY(500a) will interpolate the specified data every 500 milliseconds.
|
||||
- INTERP determines how to interpolate at each time point that meets the output conditions based on the FILL field. For how to use the FILL clause, refer to [FILL Clause](../time-series-extensions/)
|
||||
- INTERP can interpolate at a single time point specified in the RANGE field, in which case the EVERY field can be omitted. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||
- INTERP query supports NEAR FILL mode, i.e., when FILL is needed, it uses the data closest to the current time point for interpolation. When the timestamps before and after are equally close to the current time slice, FILL the previous row's value. This mode is not supported in stream computing and window queries. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR).(Supported from version 3.3.4.9).
|
||||
- INTERP `RANGE` clause supports the expansion of the time range (supported from version 3.3.4.9), such as `RANGE('2023-01-01 00:00:00', 10s)` means to find data 10s before and after the time point '2023-01-01 00:00:00' for interpolation, FILL PREV/NEXT/NEAR respectively means to look for data forward/backward/around the time point, if there is no data around the time point, then use the value specified by FILL for interpolation, therefore the FILL clause must specify a value at this time. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). Currently, only the combination of time point and time range is supported, not the combination of time interval and time range, i.e., RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h) is not supported. The specified time range rules are similar to EVERY, the unit cannot be year or month, the value cannot be 0, and cannot have quotes. When using this extension, other FILL modes except FILL PREV/NEXT/NEAR are not supported, and the EVERY clause cannot be specified.
|
||||
|
||||
## GROUP BY
|
||||
|
||||
If a GROUP BY clause is specified in the statement, the SELECT list can only contain the following expressions:
|
||||
|
|
|
@ -1186,6 +1186,7 @@ CAST(expr AS type_name)
|
|||
1) Invalid character situations when converting string types to numeric types, e.g., "a" might convert to 0, but will not throw an error.
|
||||
2) When converting to numeric types, if the value exceeds the range that `type_name` can represent, it will overflow, but will not throw an error.
|
||||
3) When converting to string types, if the converted length exceeds the length specified in `type_name`, it will be truncated, but will not throw an error.
|
||||
- The DECIMAL type does not support conversion to or from JSON, VARBINARY, or GEOMETRY types.
|
||||
|
||||
#### TO_ISO8601
|
||||
|
||||
|
@ -1691,12 +1692,14 @@ AVG(expr)
|
|||
|
||||
**Function Description**: Calculates the average value of the specified field.
|
||||
|
||||
**Return Data Type**: DOUBLE.
|
||||
**Return Data Type**: DOUBLE, DECIMAL.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Description**: When the input type is DECIMAL, the output type is also DECIMAL. The precision and scale of the output conform to the rules described in the data type section. The result type is obtained by dividing the SUM type by UINT64. If the SUM result causes a DECIMAL type overflow, a DECIMAL OVERFLOW error is reported.
|
||||
|
||||
### COUNT
|
||||
|
||||
```sql
|
||||
|
@ -1847,12 +1850,14 @@ SUM(expr)
|
|||
|
||||
**Function Description**: Calculates the sum of a column in a table/supertable.
|
||||
|
||||
**Return Data Type**: DOUBLE, BIGINT.
|
||||
**Return Data Type**: DOUBLE, BIGINT,DECIMAL.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Description**: When the input type is DECIMAL, the output type is DECIMAL(38, scale), where precision is the maximum value currently supported, and scale is the scale of the input type. If the SUM result overflows, a DECIMAL OVERFLOW error is reported.
|
||||
|
||||
### HYPERLOGLOG
|
||||
|
||||
```sql
|
||||
|
@ -1965,42 +1970,6 @@ FIRST(expr)
|
|||
- If all columns in the result set are NULL, no results are returned.
|
||||
- For tables with composite primary keys, if there are multiple entries with the smallest timestamp, only the data with the smallest composite primary key is returned.
|
||||
|
||||
### INTERP
|
||||
|
||||
```sql
|
||||
INTERP(expr [, ignore_null_values])
|
||||
|
||||
ignore_null_values: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Function Description**: Returns the record value or interpolated value of a specified column at a specified time slice. The ignore_null_values parameter can be 0 or 1, where 1 means to ignore NULL values, default is 0.
|
||||
|
||||
**Return Data Type**: Same as the field type.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Usage Instructions**
|
||||
|
||||
- INTERP is used to obtain the record value of a specified column at a specified time slice. If there is no row data that meets the conditions at that time slice, interpolation will be performed according to the settings of the FILL parameter.
|
||||
- The input data for INTERP is the data of the specified column, which can be filtered through conditional statements (where clause). If no filtering condition is specified, the input is all data.
|
||||
- INTERP SQL queries need to be used together with the RANGE, EVERY, and FILL keywords; stream computing cannot use RANGE, needs EVERY and FILL keywords together.
|
||||
- The output time range for INTERP is specified by the RANGE(timestamp1, timestamp2) field, which must satisfy timestamp1 \<= timestamp2. Here, timestamp1 is the start value of the output time range, i.e., if the conditions for interpolation are met at timestamp1, then timestamp1 is the first record output, and timestamp2 is the end value of the output time range, i.e., the timestamp of the last record output cannot be greater than timestamp2.
|
||||
- INTERP determines the number of results within the output time range based on the EVERY(time_unit) field, starting from timestamp1 and interpolating at fixed intervals of time (time_unit value), where time_unit can be time units: 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), 1w (weeks). For example, EVERY(500a) will interpolate the specified data every 500 milliseconds.
|
||||
- INTERP determines how to interpolate at each time point that meets the output conditions based on the FILL field. For how to use the FILL clause, refer to [FILL Clause](../time-series-extensions/)
|
||||
- INTERP can interpolate at a single time point specified in the RANGE field, in which case the EVERY field can be omitted. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||
- When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline.
|
||||
- INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0).
|
||||
- INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0).
|
||||
- For queries on tables with composite primary keys, if there are data with the same timestamp, only the data with the smallest composite primary key participates in the calculation.
|
||||
- INTERP query supports NEAR FILL mode, i.e., when FILL is needed, it uses the data closest to the current time point for interpolation. When the timestamps before and after are equally close to the current time slice, FILL the previous row's value. This mode is not supported in stream computing and window queries. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR).(Supported from version 3.3.4.9).
|
||||
- INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9.
|
||||
- INTERP `RANGE` clause supports the expansion of the time range (supported from version 3.3.4.9), For example, `RANGE('2023-01-01 00:00:00', 10s)` means that only data within 10s around the time point '2023-01-01 00:00:00' can be used for interpolation. `FILL PREV/NEXT/NEAR` respectively means to look for data forward/backward/around the time point. If there is no data around the time point, the default value specified by `FILL` is used for interpolation. Therefore the `FILL` clause must specify the default value at the same time. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). Starting from the 3.3.6.0 version, the combination of time period and time range is supported. When interpolating for each point within the time period, the time range requirement must be met. Prior versions only supported single time point and its time range. The available values for time range are similar to `EVERY`, the unit cannot be year or month, the value must be greater than 0, and cannot be in quotes. When using this extension, `FILL` modes other than `PREV/NEXT/NEAR` are not supported.
|
||||
|
||||
### LAST
|
||||
|
||||
```sql
|
||||
|
@ -2266,6 +2235,36 @@ ignore_option: {
|
|||
- When there is no composite primary key, if different subtables have data with the same timestamp, a "Duplicate timestamps not allowed" message will be displayed
|
||||
- When using composite primary keys, the timestamp and primary key combinations of different subtables may be the same, which row is used depends on which one is found first, meaning that the results of running diff() multiple times in this situation may vary.
|
||||
|
||||
### INTERP
|
||||
|
||||
```sql
|
||||
INTERP(expr [, ignore_null_values])
|
||||
|
||||
ignore_null_values: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Function Description**: Returns the record value or interpolated value of a specified column at a specified time slice. The ignore_null_values parameter can be 0 or 1, where 1 means to ignore NULL values, default is 0.
|
||||
|
||||
**Return Data Type**: Same as the field type.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Usage Instructions**
|
||||
|
||||
- INTERP is used to obtain the record value of a specified column at the specified time slice. It has a dedicated syntax (interp_clause) when used. For syntax introduction, see [reference link](../query-data/#interp).
|
||||
- When there is no row data that meets the conditions at the specified time slice, the INTERP function will interpolate according to the settings of the [FILL](../time-series-extensions/#fill-clause) parameter.
|
||||
- When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline.
|
||||
- When using INTERP with FILL PREV/NEXT/NEAR modes, its behavior differs from window queries. If data exists at the slice, no FILL operation will be performed, even if the current value is NULL.
|
||||
- INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0).
|
||||
- INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0).
|
||||
- INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9.
|
||||
- For queries on tables with composite primary keys, if there are data with the same timestamp, only the data with the smallest composite primary key participates in the calculation.
|
||||
|
||||
### IRATE
|
||||
|
||||
```sql
|
||||
|
|
|
@ -84,10 +84,10 @@ The FILL statement specifies the filling mode when data is missing in a window i
|
|||
|
||||
1. No filling: NONE (default filling mode).
|
||||
2. VALUE filling: Fixed value filling, where the fill value must be specified. For example: FILL(VALUE, 1.23). Note that the final fill value is determined by the type of the corresponding column, such as FILL(VALUE, 1.23), if the corresponding column is of INT type, then the fill value is 1. If multiple columns in the query list need FILL, then each FILL column must specify a VALUE, such as `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note, only ordinary columns in the SELECT expression need to specify FILL VALUE, such as `_wstart`, `_wstart+1a`, `now`, `1+1` and the partition key (like tbname) used with partition by do not need to specify VALUE, like `timediff(last(ts), _wstart)` needs to specify VALUE.
|
||||
3. PREV filling: Fill data using the previous non-NULL value. For example: FILL(PREV).
|
||||
3. PREV filling: Fill data using the previous value. For example: FILL(PREV).
|
||||
4. NULL filling: Fill data with NULL. For example: FILL(NULL).
|
||||
5. LINEAR filling: Perform linear interpolation filling based on the nearest non-NULL values before and after. For example: FILL(LINEAR).
|
||||
6. NEXT filling: Fill data using the next non-NULL value. For example: FILL(NEXT).
|
||||
6. NEXT filling: Fill data using the next value. For example: FILL(NEXT).
|
||||
|
||||
Among these filling modes, except for the NONE mode which does not fill by default, other modes will be ignored if there is no data in the entire query time range, resulting in no fill data and an empty query result. This behavior is reasonable under some modes (PREV, NEXT, LINEAR) because no data means no fill value can be generated. For other modes (NULL, VALUE), theoretically, fill values can be generated, and whether to output fill values depends on the application's needs. To meet the needs of applications that require forced filling of data or NULL, without breaking the compatibility of existing filling modes, two new filling modes have been added starting from version 3.0.3.0:
|
||||
|
||||
|
@ -112,7 +112,7 @@ The differences between NULL, NULL_F, VALUE, VALUE_F filling modes for different
|
|||
|
||||
Time windows can be divided into sliding time windows and tumbling time windows.
|
||||
|
||||
The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window.
|
||||
The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset".
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep01} alt=""/>
|
||||
|
|
|
@ -58,11 +58,11 @@ Note: Subscriptions to supertables and databases are advanced subscription modes
|
|||
|
||||
## Delete topic
|
||||
|
||||
If you no longer need to subscribe to data, you can delete the topic, but note: only TOPICS that are not currently being subscribed to can be deleted.
|
||||
If you no longer need to subscribe to the data, you can delete the topic. If the current topic is subscribed to by a consumer, it can be forcibly deleted using the FORCE syntax. After the forced deletion, the subscribed consumer will consume data with errors (FORCE syntax supported from version 3.3.6.0)
|
||||
|
||||
```sql
|
||||
/* Delete topic */
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
|
||||
```
|
||||
|
||||
At this point, if there are consumers on this subscription topic, they will receive an error.
|
||||
|
@ -81,8 +81,10 @@ Consumer groups can only be created through the TDengine client driver or APIs p
|
|||
|
||||
## Delete consumer group
|
||||
|
||||
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted. If there are consumers in the current consumer group who are consuming, the FORCE syntax can be used to force deletion. After forced deletion, subscribed consumers will consume data with errors (FORCE syntax supported from version 3.3.6.0).
|
||||
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
Deletes the consumer group `cgroup_name` on the topic `topic_name`.
|
||||
|
|
|
@ -11,11 +11,11 @@ import imgStream from './assets/stream-processing-01.png';
|
|||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery [notification_definition]
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -127,6 +127,13 @@ create stream if not exists s1 fill_history 1 into st1 as select count(*) from
|
|||
|
||||
If the stream task is completely outdated and you no longer want it to monitor or process data, you can manually delete it. The computed data will still be retained.
|
||||
|
||||
Tips:
|
||||
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
|
||||
|
||||
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
|
||||
|
||||
- Besides, do not create multiple streams asynchronously at the same time, as transaction conflicts may cause subsequent streams to fail.
|
||||
|
||||
## Deleting Stream Computing
|
||||
|
||||
```sql
|
||||
|
@ -157,6 +164,7 @@ For non-window computations, the trigger of stream computing is real-time; for w
|
|||
2. WINDOW_CLOSE: Triggered when the window closes (window closure is determined by event time, can be used in conjunction with watermark)
|
||||
3. MAX_DELAY time: Trigger computation if the window closes. If the window does not close, and the time since it has not closed exceeds the time specified by max delay, then trigger computation.
|
||||
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only compute and push the results of the currently closed window. The window is only computed once at the moment of closure and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE.
|
||||
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
|
||||
|
||||
Since the closure of the window is determined by event time, if the event stream is interrupted or continuously delayed, the event time cannot be updated, which may result in not obtaining the latest computation results.
|
||||
|
||||
|
@ -524,6 +532,24 @@ These fields are present only when "windowType" is "Count".
|
|||
#### Fields for Window Invalidation
|
||||
|
||||
Due to scenarios such as data disorder, updates, or deletions during stream computing, windows that have already been generated might be removed or their results need to be recalculated. In such cases, a notification with the eventType "WINDOW_INVALIDATION" is sent to inform which windows have been invalidated.
|
||||
|
||||
For events with "eventType" as "WINDOW_INVALIDATION", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
|
||||
## Support for Virtual Tables in Stream Computing
|
||||
|
||||
Starting with v3.3.6.0, stream computing can use virtual tables—including virtual regular tables, virtual sub-tables, and virtual super tables—as data sources for computation. The syntax is identical to that for non‑virtual tables.
|
||||
|
||||
However, because the behavior of virtual tables differs from that of non‑virtual tables, the following restrictions apply when using stream computing:
|
||||
|
||||
1. The schema of virtual regular tables/virtual sub-tables involved in stream computing cannot be modified.
|
||||
1. During stream computing, if the data source corresponding to a column in a virtual table is changed, the stream computation will not pick up the change; it will still read from the old data source.
|
||||
1. During stream computing, if the original table corresponding to a column in a virtual table is deleted and later a new table with the same name and a column with the same name is created, the stream computation will not read data from the new table.
|
||||
1. The watermark for stream computing must be 0; otherwise, an error will occur during creation.
|
||||
1. If the data source for stream computing is a virtual super table, sub-tables that are added after the stream computing task starts will not participate in the computation.
|
||||
1. The timestamps of different underlying tables in a virtual table may not be completely consistent; merging the data might produce null values, and interpolation is currently not supported.
|
||||
1. Out-of-order data, updates, or deletions are not handled. In other words, when creating a stream, you cannot specify `ignore update 0` or `ignore expired 0`; otherwise, an error will be reported.
|
||||
1. Historical data computation is not supported. That is, when creating a stream, you cannot specify `fill_history 1`; otherwise, an error will be reported.
|
||||
1. The trigger modes MAX_DELAY, CONTINUOUS_WINDOW_CLOSE and FORCE_WINDOW_CLOSE are not supported.
|
||||
1. The COUNT_WINDOW type is not supported.
|
||||
|
|
|
@ -35,6 +35,7 @@ The list of keywords is as follows:
|
|||
| AS | |
|
||||
| ASC | |
|
||||
| ASOF | |
|
||||
| ASYNC | 3.3.6.0+ |
|
||||
| AT_ONCE | |
|
||||
| ATTACH | |
|
||||
| AUTO | 3.3.5.0+ |
|
||||
|
|
|
@ -29,6 +29,17 @@ SELECT a.* FROM meters a LEFT ASOF JOIN meters b ON timetruncate(a.ts, 1s) < tim
|
|||
### Main Join Condition
|
||||
|
||||
As a time-series database, all join queries in TDengine revolve around the primary key timestamp column. Therefore, all join queries (except ASOF/Window Join) must include an equality condition on the primary key column, and the first primary key column equality condition that appears in the join conditions will be considered the main join condition. ASOF Join's main join condition can include non-equality conditions, while Window Join's main join condition is specified through `WINDOW_OFFSET`.
|
||||
Starting from version 3.3.6.0, TDengine supports constant timestamps in subqueries (including constant functions with return timestamps such as today (), now (), etc., constant timestamps and their addition and subtraction operations) as equivalent primary key columns that can appear in the main join condition. For example:
|
||||
|
||||
```sql
|
||||
SELECT * from d1001 a JOIN (SELECT today() as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
|
||||
```
|
||||
|
||||
The above example SQL will perform join operation between all records in table d1001 today and a certain time record in table d1002. It should be noticed that the constant time string appears in SQL will not be treated as a timestamp by default. For example, "2025-03-19 10:00:00.000" will only be treated as a string instead of a timestamp. Therefore, when it needs to be treated as a constant timestamp, you can specify the constant string as a timestamp type by using the type prefix timestamp. For example:
|
||||
|
||||
```sql
|
||||
SELECT * from d1001 a JOIN (SELECT timestamp '2025-03-19 10:00:00.000' as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
|
||||
```
|
||||
|
||||
Apart from Window Join, TDengine supports the `timetruncate` function operation in the main join condition, such as `ON timetruncate(a.ts, 1s) = timetruncate(b.ts, 1s)`, but does not support other functions and scalar operations.
|
||||
|
||||
|
@ -38,7 +49,7 @@ The characteristic ASOF/Window Join of time-series databases supports grouping t
|
|||
|
||||
### Primary Key Timeline
|
||||
|
||||
As a time-series database, TDengine requires each table (subtable) to have a primary key timestamp column, which will serve as the primary key timeline for many time-related operations. The result of a subquery or the result of a Join operation also needs to clearly identify which column will be considered the primary key timeline for subsequent time-related operations. In subqueries, the first appearing ordered primary key column (or its operation) or a pseudocolumn equivalent to the primary key column (`_wstart`/`_wend`) will be considered the primary key timeline of the output table. The selection of the primary key timeline in Join output results follows these rules:
|
||||
As a time-series database, TDengine requires each table (subtable) to have a primary key timestamp column, which will serve as the primary key timeline for many time-related operations. The result of a subquery or the result of a Join operation also needs to clearly identify which column will be considered the primary key timeline for subsequent time-related operations. In subqueries, the first appearing ordered primary key column (or its operation) or a pseudocolumn equivalent to the primary key column (`_wstart`/`_wend`) will be considered the primary key timeline of the output table. In addition, starting with version 3.3.6.0, TDengine also supports constant timestamp columns in subquery results as the primary key timeline for the output table. The selection of the primary key timeline in Join output results follows these rules:
|
||||
|
||||
- In the Left/Right Join series, the primary key column of the driving table (subquery) will be used as the primary key timeline for subsequent queries; additionally, within the Window Join window, since both tables are ordered, any table's primary key column can be used as the primary key timeline, with a preference for the primary key column of the same table.
|
||||
- Inner Join can use the primary key column of any table as the primary key timeline, but when there are grouping conditions similar to tag column equality conditions related by `AND` with the main join condition, it will not produce a primary key timeline.
|
||||
|
|
|
@ -36,6 +36,7 @@ In this document, it specifically refers to the internal levels of the second-le
|
|||
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|
||||
## SQL Syntax
|
||||
|
||||
|
|
|
@ -682,7 +682,7 @@ The basic API is used to establish database connections and provide a runtime en
|
|||
- **Interface Description**: Cleans up the runtime environment, should be called before the application exits.
|
||||
|
||||
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
|
||||
- **Interface Description**: Sets client options, currently supports locale (`TSDB_OPTION_LOCALE`), character set (`TSDB_OPTION_CHARSET`), timezone (`TSDB_OPTION_TIMEZONE`), and configuration file path (`TSDB_OPTION_CONFIGDIR`). Locale, character set, and timezone default to the current settings of the operating system.
|
||||
- **Interface Description**: Sets client options, currently supports locale (`TSDB_OPTION_LOCALE`), character set (`TSDB_OPTION_CHARSET`), timezone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`), and driver type (`TSDB_OPTION_DRIVER`). Locale, character set, and timezone default to the current settings of the operating system. The driver type can be either the native interface(`native`) or the WebSocket interface(`websocket`), with the default being `websocket`.
|
||||
- **Parameter Description**:
|
||||
- `option`: [Input] Setting item type.
|
||||
- `arg`: [Input] Setting item value.
|
||||
|
@ -830,6 +830,12 @@ This section introduces APIs that are all synchronous interfaces. After being ca
|
|||
- res: [Input] Result set.
|
||||
- **Return Value**: Non-`NULL`: successful, returns a pointer to a TAOS_FIELD structure, each element representing the metadata of a column. `NULL`: failure.
|
||||
|
||||
- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)`
|
||||
- **Interface Description**: Retrieves the attributes of each column in the query result set (column name, data type, column length). Used in conjunction with `taos_num_fields()`, it can be used to parse the data of a tuple (a row) returned by `taos_fetch_row()`. In addition to the basic information provided by TAOS_FIELD, TAOS_FIELD_E also includes `precision` and `scale` information for the data type.
|
||||
- **Parameter Description**:
|
||||
- res: [Input] Result set.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_FIELD_E structure, where each element represents the metadata of a column. `NULL`: Failure.
|
||||
|
||||
- `void taos_stop_query(TAOS_RES *res)`
|
||||
- **Interface Description**: Stops the execution of the current query.
|
||||
- **Parameter Description**:
|
||||
|
@ -1121,10 +1127,14 @@ In addition to using SQL or parameter binding APIs to insert data, you can also
|
|||
- conf: [Input] Pointer to a valid tmq_conf_t structure, representing a TMQ configuration object.
|
||||
- key: [Input] Configuration item key name.
|
||||
- value: [Input] Configuration item value.
|
||||
- **Return Value**: Returns a tmq_conf_res_t enum value, indicating the result of the configuration setting.
|
||||
- TMQ_CONF_OK: Successfully set the configuration item.
|
||||
- TMQ_CONF_INVALID_KEY: Invalid key value.
|
||||
- TMQ_CONF_UNKNOWN: Invalid key name.
|
||||
- **Return Value**: Returns a tmq_conf_res_t enum value, indicating the result of the configuration setting. tmq_conf_res_t defined as follows:
|
||||
```
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2, // invalid key
|
||||
TMQ_CONF_INVALID = -1, // invalid value
|
||||
TMQ_CONF_OK = 0, // success
|
||||
} tmq_conf_res_t;
|
||||
```
|
||||
|
||||
- `void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param)`
|
||||
- **Interface Description**: Sets the auto-commit callback function in the TMQ configuration object.
|
||||
|
|
|
@ -121,6 +121,7 @@ Please refer to the specific error codes:
|
|||
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
|
||||
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
|
||||
| 0x2390 | background thread write error in Efficient Writing | In the event of an efficient background thread write error, you can stop writing and rebuild the connection. |
|
||||
|
||||
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -148,6 +149,7 @@ TDengine currently supports timestamp, numeric, character, boolean types, and th
|
|||
| JSON | java.lang.String | only supported in tags |
|
||||
| VARBINARY | byte[] | |
|
||||
| GEOMETRY | byte[] | |
|
||||
| DECIMAL | java.math.BigDecimal | |
|
||||
|
||||
**Note**: Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead.
|
||||
GEOMETRY type is binary data in little endian byte order, complying with the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/)
|
||||
|
@ -319,7 +321,15 @@ The configuration parameters in properties are as follows:
|
|||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty.
|
||||
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE: Efficient Writing mode. Currently, only the `stmt` method is supported. Effective only when using WebSocket connections. DeDefault value is empty, meaning Efficient Writing mode is not enabled.
|
||||
- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM: In Efficient Writing mode, this refers to the number of background write threads. Effective only when using WebSocket connections. Default value is 10.
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW: In Efficient Writing mode, this is the batch size for writing data, measured in rows. Effective only when using WebSocket connections. Default value is 1000.
|
||||
- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW: In Efficient Writing mode, this is the cache size, measured in rows. Effective only when using WebSocket connections. Default value is 10000.
|
||||
- TSDBDriver.PROPERTY_KEY_COPY_DATA: In Efficient Writing mode, this determines Whether to copy the binary data passed by the application through the `addBatch` method. Effective only when using WebSocket connections. Default value is false.
|
||||
- TSDBDriver.PROPERTY_KEY_STRICT_CHECK: In Efficient Writing mode, this determines whether to validate the length of table names and variable-length data types. Effective only when using WebSocket connections. Default value is false.
|
||||
- TSDBDriver.PROPERTY_KEY_RETRY_TIMES: In Efficient Writing mode, this is the number of retry attempts for failed write operations. Effective only when using WebSocket connections. Default value is 3.
|
||||
|
||||
Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties.
|
||||
|
||||
**Priority of Configuration Parameters**
|
||||
|
|
|
@ -25,6 +25,7 @@ Support all platforms that can run Node.js.
|
|||
|
||||
| Node.js Connector Version | Major Changes | TDengine Version |
|
||||
| ------------------------- | ------------------------------------------------------------------------ | --------------------------- |
|
||||
| 3.1.5 | Password supports special characters. | - |
|
||||
| 3.1.4 | Modified the readme.| - |
|
||||
| 3.1.3 | Upgraded the es5-ext version to address vulnerabilities in the lower version. | - |
|
||||
| 3.1.2 | Optimized data protocol and parsing, significantly improved performance. | - |
|
||||
|
|
|
@ -41,6 +41,8 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000107 | Ref ID is removed | The referenced ref resource has been released | Preserve the scene and logs, report issue on github |
|
||||
| 0x80000108 | Invalid Ref ID | Invalid ref ID | Preserve the scene and logs, report issue on github |
|
||||
| 0x8000010A | Ref is not there | ref information does not exist | Preserve the scene and logs, report issue on github |
|
||||
| 0x8000010B | Driver was not loaded | libtaosnative.so or libtaosws.so was not found in the system path | Reinstall the client driver |
|
||||
| 0x8000010C | Function was not loaded from the driver | some function defined in libtaos.so are not implemented in libtaosnative.so or libtaosws.so | Reinstall the client driver |
|
||||
| 0x80000110 | Unexpected generic error | System internal error | Preserve the scene and logs, report issue on github |
|
||||
| 0x80000111 | Action in progress | Operation in progress | 1. Wait for the operation to complete 2. Cancel the operation if necessary 3. If it exceeds a reasonable time and still not completed, preserve the scene and logs, or contact customer support |
|
||||
| 0x80000112 | Out of range | Configuration parameter exceeds allowed value range | Change the parameter |
|
||||
|
@ -73,6 +75,8 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
||||
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
||||
| 0x8000013C | Invalid disk id | Invalid disk id | Check users whether the mounted disk is invalid or use the parameter diskIDCheckEnabled to skip the disk check. |
|
||||
| 0x8000013D | Decimal value overflow | Decimal value overflow | Check query expression and decimal values |
|
||||
| 0x8000013E | Division by zero error | Division by zero | Check division expression |
|
||||
|
||||
|
||||
## tsc
|
||||
|
@ -109,6 +113,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x8000030C | Invalid query id | Internal error | Report issue |
|
||||
| 0x8000030E | Invalid connection id | Internal error | Report issue |
|
||||
| 0x80000315 | User is disabled | User is unavailable | Grant permissions |
|
||||
| 0x80000318 | Mnode internal error | Internal error | Report issue |
|
||||
| 0x80000320 | Object already there | Internal error | Report issue |
|
||||
| 0x80000322 | Invalid table type | Internal error | Report issue |
|
||||
| 0x80000323 | Object not there | Internal error | Report issue |
|
||||
|
@ -165,6 +170,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x8000038B | Index not exist | Does not exist | Confirm if the operation is correct |
|
||||
| 0x80000396 | Database in creating status | Database is being created | Retry |
|
||||
| 0x8000039A | Invalid system table name | Internal error | Report issue |
|
||||
| 0x8000039F | No VGroup's leader need to be balanced | Perform balance leader operation on VGroup | There is no VGroup's leader needs to be balanced |
|
||||
| 0x800003A0 | Mnode already exists | Already exists | Confirm if the operation is correct |
|
||||
| 0x800003A1 | Mnode not there | Already exists | Confirm if the operation is correct |
|
||||
| 0x800003A2 | Qnode already exists | Already exists | Confirm if the operation is correct |
|
||||
|
@ -370,90 +376,90 @@ This document details the server error codes that may be encountered when using
|
|||
|
||||
## parser
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Suggested Actions for Users |
|
||||
| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 0x80002600 | syntax error near | SQL syntax error | Check and correct the SQL statement |
|
||||
| 0x80002601 | Incomplete SQL statement | Incomplete SQL statement | Check and correct the SQL statement |
|
||||
| 0x80002602 | Invalid column name | Illegal or non-existent column name | Check and correct the SQL statement |
|
||||
| 0x80002603 | Table does not exist | Table does not exist | Check and confirm the existence of the table in the SQL statement |
|
||||
| 0x80002604 | Column ambiguously defined | Column (alias) redefined | Check and correct the SQL statement |
|
||||
| 0x80002605 | Invalid value type | Illegal constant value | Check and correct the SQL statement |
|
||||
| 0x80002608 | There mustn't be aggregation | Aggregation function used in illegal clause | Check and correct the SQL statement |
|
||||
| 0x80002609 | ORDER BY item must be the number of a SELECT-list expression | Illegal position specified in Order by | Check and correct the SQL statement |
|
||||
| 0x8000260A | Not a GROUP BY expression | Illegal group by statement | Check and correct the SQL statement |
|
||||
| 0x8000260B | Not SELECTed expression | Illegal expression | Check and correct the SQL statement |
|
||||
| 0x8000260C | Not a single-group group function | Illegal use of column and function | Check and correct the SQL statement |
|
||||
| 0x8000260D | Tags number not matched | Mismatched number of tag columns | Check and correct the SQL statement |
|
||||
| 0x8000260E | Invalid tag name | Invalid or non-existent tag name | Check and correct the SQL statement |
|
||||
| 0x80002610 | Value is too long | Value length exceeds limit | Check and correct the SQL statement or API parameters |
|
||||
| 0x80002611 | Password too short or empty | Password is empty or less than 8 chars | Use a valid password |
|
||||
| 0x80002612 | Port should be an integer that is less than 65535 and greater than 0 | Illegal port number | Check and correct the port number |
|
||||
| 0x80002613 | Endpoint should be in the format of 'fqdn:port' | Incorrect address format | Check and correct the address information |
|
||||
| 0x80002614 | This statement is no longer supported | Feature has been deprecated | Refer to the feature documentation |
|
||||
| 0x80002615 | Interval too small | Interval value exceeds the allowed minimum | Change the INTERVAL value |
|
||||
| 0x80002616 | Database not specified | Database not specified | Specify the database for the current operation |
|
||||
| 0x80002617 | Invalid identifier name | Illegal or invalid length ID | Check the names of related libraries, tables, columns, TAGs, etc. in the statement |
|
||||
| 0x80002618 | Corresponding supertable not in this db | Supertable does not exist | Check if the corresponding supertable exists in the database |
|
||||
| 0x80002619 | Invalid database option | Illegal database option value | Check and correct the database option values |
|
||||
| 0x8000261A | Invalid table option | Illegal table option value | Check and correct the table option values |
|
||||
| 0x80002624 | GROUP BY and WINDOW-clause can't be used together | Group by and window cannot be used together | Check and correct the SQL statement |
|
||||
| 0x80002627 | Aggregate functions do not support nesting | Functions do not support nested use | Check and correct the SQL statement |
|
||||
| 0x80002628 | Only support STATE_WINDOW on integer/bool/varchar column | Unsupported STATE_WINDOW data type | Check and correct the SQL statement |
|
||||
| 0x80002629 | Not support STATE_WINDOW on tag column | STATE_WINDOW not supported on tag column | Check and correct the SQL statement |
|
||||
| 0x8000262A | STATE_WINDOW not support for supertable query | STATE_WINDOW not supported for supertable | Check and correct the SQL statement |
|
||||
| 0x8000262B | SESSION gap should be fixed time window, and greater than 0 | Illegal SESSION window value | Check and correct the SQL statement |
|
||||
| 0x8000262C | Only support SESSION on primary timestamp column | Illegal SESSION window column | Check and correct the SQL statement |
|
||||
| 0x8000262D | Interval offset cannot be negative | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x8000262E | Cannot use 'year' as offset when interval is 'month' | Illegal INTERVAL offset unit | Check and correct the SQL statement |
|
||||
| 0x8000262F | Interval offset should be shorter than interval | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x80002630 | Does not support sliding when interval is natural month/year | Illegal sliding unit | Check and correct the SQL statement |
|
||||
| 0x80002631 | sliding value no larger than the interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002632 | sliding value can not less than 1%% of interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002633 | Only one tag if there is a json tag | Only single JSON tag column supported | Check and correct the SQL statement |
|
||||
| 0x80002634 | Query block has incorrect number of result columns | Mismatched number of columns | Check and correct the SQL statement |
|
||||
| 0x80002635 | Incorrect TIMESTAMP value | Illegal primary timestamp column value | Check and correct the SQL statement |
|
||||
| 0x80002637 | soffset/offset can not be less than 0 | Illegal soffset/offset value | Check and correct the SQL statement |
|
||||
| 0x80002638 | slimit/soffset only available for PARTITION/GROUP BY query | slimit/soffset only supported for PARTITION BY/GROUP BY statements | Check and correct the SQL statement |
|
||||
| 0x80002639 | Invalid topic query | Unsupported TOPIC query | |
|
||||
| 0x8000263A | Cannot drop supertable in batch | Batch deletion of supertables not supported | Check and correct the SQL statement |
|
||||
| 0x8000263B | Start(end) time of query range required or time range too large | Window count exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000263C | Duplicated column names | Duplicate column names | Check and correct the SQL statement |
|
||||
| 0x8000263D | Tags length exceeds max length | tag value length exceeds maximum supported range | Check and correct the SQL statement |
|
||||
| 0x8000263E | Row length exceeds max length | Row length check and correct SQL statement | Check and correct the SQL statement |
|
||||
| 0x8000263F | Illegal number of columns | Incorrect number of columns | Check and correct the SQL statement |
|
||||
| 0x80002640 | Too many columns | Number of columns exceeds limit | Check and correct the SQL statement |
|
||||
| 0x80002641 | First column must be timestamp | The first column must be the primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002642 | Invalid binary/nchar column/tag length | Incorrect length for binary/nchar | Check and correct the SQL statement |
|
||||
| 0x80002643 | Invalid number of tag columns | Incorrect number of tag columns | Check and correct the SQL statement |
|
||||
| 0x80002644 | Permission denied | Permission error | Check and confirm user permissions |
|
||||
| 0x80002645 | Invalid stream query | Illegal stream statement | Check and correct the SQL statement |
|
||||
| 0x80002646 | Invalid _c0 or_rowts expression | Illegal use of _c0 or_rowts | Check and correct the SQL statement |
|
||||
| 0x80002647 | Invalid timeline function | Function depends on non-existent primary timestamp | Check and correct the SQL statement |
|
||||
| 0x80002648 | Invalid password | Password does not meet standards | Check and change the password |
|
||||
| 0x80002649 | Invalid alter table statement | Illegal modify table statement | Check and correct the SQL statement |
|
||||
| 0x8000264A | Primary timestamp column cannot be dropped | Primary timestamp column cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x8000264B | Only binary/nchar column length could be modified, and the length can only be increased, not decreased | Illegal column modification | Check and correct the SQL statement |
|
||||
| 0x8000264C | Invalid tbname pseudocolumn | Illegal use of tbname column | Check and correct the SQL statement |
|
||||
| 0x8000264D | Invalid function name | Illegal function name | Check and correct the function name |
|
||||
| 0x8000264E | Comment too long | Comment length exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000264F | Function(s) only allowed in SELECT list, cannot mixed with non scalar functions or columns | Illegal mixing of functions | Check and correct the SQL statement |
|
||||
| 0x80002650 | Window query not supported, since no valid timestamp column included in the result of subquery | Window query depends on non-existent primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002651 | No columns can be dropped | Essential columns cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x80002652 | Only tag can be json type | Normal columns do not support JSON type | Check and correct the SQL statement |
|
||||
| 0x80002655 | The DELETE statement must have a definite time window range | Illegal WHERE condition in DELETE statement | Check and correct the SQL statement |
|
||||
| 0x80002656 | The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes | Illegal number of DNODEs specified in REDISTRIBUTE VGROUP | Check and correct the SQL statement |
|
||||
| 0x80002657 | Fill now allowed | Function does not allow FILL feature | Check and correct the SQL statement |
|
||||
| 0x80002658 | Invalid windows pc | Illegal use of window pseudocolumn | Check and correct the SQL statement |
|
||||
| 0x80002659 | Window not allowed | Function cannot be used in window | Check and correct the SQL statement |
|
||||
| 0x8000265A | Stream not allowed | Function cannot be used in stream computation | Check and correct the SQL statement |
|
||||
| 0x8000265B | Group by not allowd | Function cannot be used in grouping | Check and correct the SQL statement |
|
||||
| 0x8000265D | Invalid interp clause | Illegal INTERP or related statement | Check and correct the SQL statement |
|
||||
| 0x8000265E | Not valid function ion window | Illegal window statement | Check and correct the SQL statement |
|
||||
| 0x8000265F | Only support single table | Function only supported in single table queries | Check and correct the SQL statement |
|
||||
| 0x80002660 | Invalid sma index | Illegal creation of SMA statement | Check and correct the SQL statement |
|
||||
| 0x80002661 | Invalid SELECTed expression | Invalid query statement | Check and correct the SQL statement |
|
||||
| 0x80002662 | Fail to get table info | Failed to retrieve table metadata information | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002663 | Not unique table/alias | Table name (alias) conflict | Check and correct the SQL statement |
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Suggested Actions for Users |
|
||||
|------------| ------------------------------------------------------------ |----------------------------------------------------------------------------| ------------------------------------------------------------ |
|
||||
| 0x80002600 | syntax error near | SQL syntax error | Check and correct the SQL statement |
|
||||
| 0x80002601 | Incomplete SQL statement | Incomplete SQL statement | Check and correct the SQL statement |
|
||||
| 0x80002602 | Invalid column name | Illegal or non-existent column name | Check and correct the SQL statement |
|
||||
| 0x80002603 | Table does not exist | Table does not exist | Check and confirm the existence of the table in the SQL statement |
|
||||
| 0x80002604 | Column ambiguously defined | Column (alias) redefined | Check and correct the SQL statement |
|
||||
| 0x80002605 | Invalid value type | Illegal constant value | Check and correct the SQL statement |
|
||||
| 0x80002608 | There mustn't be aggregation | Aggregation function used in illegal clause | Check and correct the SQL statement |
|
||||
| 0x80002609 | ORDER BY item must be the number of a SELECT-list expression | Illegal position specified in Order by | Check and correct the SQL statement |
|
||||
| 0x8000260A | Not a GROUP BY expression | Illegal group by statement | Check and correct the SQL statement |
|
||||
| 0x8000260B | Not SELECTed expression | Illegal expression | Check and correct the SQL statement |
|
||||
| 0x8000260C | Not a single-group group function | Illegal use of column and function | Check and correct the SQL statement |
|
||||
| 0x8000260D | Tags number not matched | Mismatched number of tag columns | Check and correct the SQL statement |
|
||||
| 0x8000260E | Invalid tag name | Invalid or non-existent tag name | Check and correct the SQL statement |
|
||||
| 0x80002610 | Value is too long | Value length exceeds limit | Check and correct the SQL statement or API parameters |
|
||||
| 0x80002611 | Password too short or empty | Password is empty or less than 8 chars | Use a valid password |
|
||||
| 0x80002612 | Port should be an integer that is less than 65535 and greater than 0 | Illegal port number | Check and correct the port number |
|
||||
| 0x80002613 | Endpoint should be in the format of 'fqdn:port' | Incorrect address format | Check and correct the address information |
|
||||
| 0x80002614 | This statement is no longer supported | Feature has been deprecated | Refer to the feature documentation |
|
||||
| 0x80002615 | Interval too small | Interval value exceeds the allowed minimum | Change the INTERVAL value |
|
||||
| 0x80002616 | Database not specified | Database not specified | Specify the database for the current operation |
|
||||
| 0x80002617 | Invalid identifier name | Illegal or invalid length ID | Check the names of related libraries, tables, columns, TAGs, etc. in the statement |
|
||||
| 0x80002618 | Corresponding supertable not in this db | Supertable does not exist | Check if the corresponding supertable exists in the database |
|
||||
| 0x80002619 | Invalid database option | Illegal database option value | Check and correct the database option values |
|
||||
| 0x8000261A | Invalid table option | Illegal table option value | Check and correct the table option values |
|
||||
| 0x80002624 | GROUP BY and WINDOW-clause can't be used together | Group by and window cannot be used together | Check and correct the SQL statement |
|
||||
| 0x80002627 | Aggregate functions do not support nesting | Functions do not support nested use | Check and correct the SQL statement |
|
||||
| 0x80002628 | Only support STATE_WINDOW on integer/bool/varchar column | Unsupported STATE_WINDOW data type | Check and correct the SQL statement |
|
||||
| 0x80002629 | Not support STATE_WINDOW on tag column | STATE_WINDOW not supported on tag column | Check and correct the SQL statement |
|
||||
| 0x8000262A | STATE_WINDOW not support for supertable query | STATE_WINDOW not supported for supertable | Check and correct the SQL statement |
|
||||
| 0x8000262B | SESSION gap should be fixed time window, and greater than 0 | Illegal SESSION window value | Check and correct the SQL statement |
|
||||
| 0x8000262C | Only support SESSION on primary timestamp column | Illegal SESSION window column | Check and correct the SQL statement |
|
||||
| 0x8000262D | Interval offset cannot be negative | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x8000262E | Cannot use 'year' as offset when interval is 'month' | Illegal INTERVAL offset unit | Check and correct the SQL statement |
|
||||
| 0x8000262F | Interval offset should be shorter than interval | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x80002630 | Does not support sliding when interval is natural month/year | Illegal sliding unit | Check and correct the SQL statement |
|
||||
| 0x80002631 | sliding value no larger than the interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002632 | sliding value can not less than 1%% of interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002633 | Only one tag if there is a json tag | Only single JSON tag column supported | Check and correct the SQL statement |
|
||||
| 0x80002634 | Query block has incorrect number of result columns | Mismatched number of columns | Check and correct the SQL statement |
|
||||
| 0x80002635 | Incorrect TIMESTAMP value | Illegal primary timestamp column value | Check and correct the SQL statement |
|
||||
| 0x80002637 | soffset/offset can not be less than 0 | Illegal soffset/offset value | Check and correct the SQL statement |
|
||||
| 0x80002638 | slimit/soffset only available for PARTITION/GROUP BY query | slimit/soffset only supported for PARTITION BY/GROUP BY statements | Check and correct the SQL statement |
|
||||
| 0x80002639 | Invalid topic query | Unsupported TOPIC query | |
|
||||
| 0x8000263A | Cannot drop supertable in batch | Batch deletion of supertables not supported | Check and correct the SQL statement |
|
||||
| 0x8000263B | Start(end) time of query range required or time range too large | Window count exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000263C | Duplicated column names | Duplicate column names | Check and correct the SQL statement |
|
||||
| 0x8000263D | Tags length exceeds max length | tag value length exceeds maximum supported range | Check and correct the SQL statement |
|
||||
| 0x8000263E | Row length exceeds max length | Row length check and correct SQL statement | Check and correct the SQL statement |
|
||||
| 0x8000263F | Illegal number of columns | Incorrect number of columns | Check and correct the SQL statement |
|
||||
| 0x80002640 | Too many columns | Number of columns exceeds limit | Check and correct the SQL statement |
|
||||
| 0x80002641 | First column must be timestamp | The first column must be the primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002642 | Invalid binary/nchar column/tag length | Incorrect length for binary/nchar | Check and correct the SQL statement |
|
||||
| 0x80002643 | Invalid number of tag columns | Incorrect number of tag columns | Check and correct the SQL statement |
|
||||
| 0x80002644 | Permission denied | Permission error | Check and confirm user permissions |
|
||||
| 0x80002645 | Invalid stream query | Illegal stream statement | Check and correct the SQL statement |
|
||||
| 0x80002646 | Invalid _c0 or_rowts expression | Illegal use of _c0 or_rowts | Check and correct the SQL statement |
|
||||
| 0x80002647 | Invalid timeline function | Function depends on non-existent primary timestamp | Check and correct the SQL statement |
|
||||
| 0x80002648 | Invalid password | Password does not meet standards | Check and change the password |
|
||||
| 0x80002649 | Invalid alter table statement | Illegal modify table statement | Check and correct the SQL statement |
|
||||
| 0x8000264A | Primary timestamp column cannot be dropped | Primary timestamp column cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x8000264B | Only binary/nchar column length could be modified, and the length can only be increased, not decreased | Illegal column modification | Check and correct the SQL statement |
|
||||
| 0x8000264C | Invalid tbname pseudocolumn | Illegal use of tbname column | Check and correct the SQL statement |
|
||||
| 0x8000264D | Invalid function name | Illegal function name | Check and correct the function name |
|
||||
| 0x8000264E | Comment too long | Comment length exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000264F | Function(s) only allowed in SELECT list, cannot mixed with non scalar functions or columns | Illegal mixing of functions | Check and correct the SQL statement |
|
||||
| 0x80002650 | Window query not supported, since no valid timestamp column included in the result of subquery | Window query depends on non-existent primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002651 | No columns can be dropped | Essential columns cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x80002652 | Only tag can be json type | Normal columns do not support JSON type | Check and correct the SQL statement |
|
||||
| 0x80002655 | The DELETE statement must have a definite time window range | Illegal WHERE condition in DELETE statement | Check and correct the SQL statement |
|
||||
| 0x80002656 | The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes | Illegal number of DNODEs specified in REDISTRIBUTE VGROUP | Check and correct the SQL statement |
|
||||
| 0x80002657 | Fill now allowed | Function does not allow FILL feature | Check and correct the SQL statement |
|
||||
| 0x80002658 | Invalid windows pc | Illegal use of window pseudocolumn | Check and correct the SQL statement |
|
||||
| 0x80002659 | Window not allowed | Function cannot be used in window | Check and correct the SQL statement |
|
||||
| 0x8000265A | Stream not allowed | Function cannot be used in stream computation | Check and correct the SQL statement |
|
||||
| 0x8000265B | Group by not allowd | Function cannot be used in grouping | Check and correct the SQL statement |
|
||||
| 0x8000265D | Invalid interp clause | Illegal INTERP or related statement | Check and correct the SQL statement |
|
||||
| 0x8000265E | Not valid function ion window | Illegal window statement | Check and correct the SQL statement |
|
||||
| 0x8000265F | Only support single table | Function only supported in single table queries | Check and correct the SQL statement |
|
||||
| 0x80002660 | Invalid sma index | Illegal creation of SMA statement | Check and correct the SQL statement |
|
||||
| 0x80002661 | Invalid SELECTed expression | Invalid query statement | Check and correct the SQL statement |
|
||||
| 0x80002662 | Fail to get table info | Failed to retrieve table metadata information | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002663 | Not unique table/alias | Table name (alias) conflict | Check and correct the SQL statement |
|
||||
| 0x80002664 | Join requires valid time-series input | Unsupported JOIN query without primary timestamp column output in subquery | Check and correct the SQL statement |
|
||||
| 0x80002665 | The _TAGS pseudocolumn can only be used for subtable and supertable queries | Illegal tag column query | Check and correct the SQL statement |
|
||||
| 0x80002666 | Subquery does not output primary timestamp column | Check and correct the SQL statement | |
|
||||
|
@ -462,12 +468,19 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80002688 | Cannot use 'year' or 'month' as true_for duration | Use year or month as true_for_duration | Check and correct the SQL statement |
|
||||
| 0x80002689 | Invalid using cols function | Illegal using cols function | Check and correct the SQL statement |
|
||||
| 0x8000268A | Cols function's first param must be a select function that output a single row | The first parameter of the cols function should be a selection function | Check and correct the SQL statement |
|
||||
| 0x8000268B | Invalid using cols function with multiple output columns | Illegal using the cols function for multiple column output | Check and correct the SQL statement |
|
||||
| 0x8000268C | Invalid using alias for cols function | Illegal cols function alias | Check and correct the SQL statement |
|
||||
| 0x8000268B | Invalid using alias for cols function | Illegal cols function alias | Check and correct the SQL statement |
|
||||
| 0x8000268C | Join primary key col must be timestmap type | Join primary key data type error | Check and correct the SQL statement |
|
||||
| 0x8000268D | Invalid virtual table's ref column | Create/Update Virtual table using incorrect data source column | Check and correct the SQL statement |
|
||||
| 0x8000268E | Invalid table type | Incorrect Table type | Check and correct the SQL statement |
|
||||
| 0x8000268F | Invalid ref column type | Virtual table's column type and data source column's type are different | Check and correct the SQL statement |
|
||||
| 0x80002690 | Create child table using virtual super table | Create non-virtual child table using virtual super table | Check and correct the SQL statement |
|
||||
| 0x800026FF | Parser internal error | Internal error in parser | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002700 | Planner internal error | Internal error in planner | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002701 | Expect ts equal | JOIN condition validation failed | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002702 | Cross join not support | CROSS JOIN not supported | Check and correct the SQL statement |
|
||||
| 0x80002704 | Planner slot key not found | Planner cannot find slotId during making physic plan | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002705 | Planner invalid table type | Planner get invalid table type | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002706 | Planner invalid query control plan type | Planner get invalid query control plan type during making physic plan | Preserve the scene and logs, report issue on GitHub |
|
||||
|
||||
## function
|
||||
|
||||
|
@ -545,3 +558,30 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
|
||||
| 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs |
|
||||
|
||||
## TDgpt
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommanded Actions for Users |
|
||||
| ---------- | --------------------- | -------------------------------------------------------------------------------- | ------------------------------ |
|
||||
| 0x80000440 | Analysis service response is NULL | The response content is empty | Check the taosanode.app.log for detailed response information |
|
||||
| 0x80000441 | Analysis service can't access | Service is not work currectly, or network is broken | Check the status of taosanode and network status |
|
||||
| 0x80000442 | Analysis algorithm is missing | Algorithm used in analysis is not specified | Add the "algo" parameter in forecast function or anomaly_window clause |
|
||||
| 0x80000443 | Analysis algorithm not loaded | The specified algorithm is not available | Check for the specified algorithm |
|
||||
| 0x80000444 | Analysis invalid buffer type | The bufferred data type is invalid | Check the taosanode.app.log for more details |
|
||||
| 0x80000445 | Analysis failed since anode return error | The responses from anode with error message | Check the taosanode.app.log for more details |
|
||||
| 0x80000446 | Analysis failed since too many input rows for anode | Input data is too many | Reduce the rows of input data to below than the threshold |
|
||||
| 0x80000447 | white-noise data not processed | white noise data is not processed | Ignore the white noise check or use another input data |
|
||||
| 0x80000448 | Analysis internal error, not processed | Internal error occurs | Check the taosanode.app.log for more details |
|
||||
|
||||
|
||||
## virtual table
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|
||||
|------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|
|
||||
| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling |
|
||||
| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling |
|
||||
| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling |
|
||||
| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table |
|
||||
| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type |
|
||||
| 0x80006205 | Virtual table not support in STMT query and STMT insert | Use virtual table in stmt query and stmt insert | do not use virtual table in stmt query and insert |
|
||||
| 0x80006206 | Virtual table not support in Topic | Use virtual table in topic | do not use virtual table in topic |
|
||||
| 0x80006207 | Virtual super table query not support origin table from different databases | Virtual super table ‘s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database |
|
||||
|
|
|
@ -0,0 +1,297 @@
|
|||
---
|
||||
title: Usage of Special Characters in Passwords
|
||||
description: Usage of special characters in user passwords in TDengine
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
TDengine user passwords must meet the following rules:
|
||||
|
||||
1. The username must not exceed 23 bytes.
|
||||
2. The password length must be between 8 and 255 characters.
|
||||
3. The range of password characters:
|
||||
1. Uppercase letters: `A-Z`
|
||||
2. Lowercase letters: `a-z`
|
||||
3. Numbers: `0-9`
|
||||
4. Special characters: `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
|
||||
4. When strong password is enabled (EnableStrongPassword 1, enabled by default), the password must contain at least three of the following categories: uppercase letters, lowercase letters, numbers, and special characters. When not enabled, there are no restrictions on character types.
|
||||
|
||||
## Usage Guide for Special Characters in Different Components
|
||||
|
||||
Take the username `user1` and password `Ab1!@#$%^&*()-_+=[]{}` as an example.
|
||||
|
||||
```sql
|
||||
CREATE USER user1 PASS 'Ab1!@#$%^&*()-_+=[]{}';
|
||||
```
|
||||
|
||||
<Tabs defaultValue="shell" groupId="component">
|
||||
<TabItem label="CLI" value="shell">
|
||||
|
||||
In the [TDengine Command Line Interface (CLI)](../../tdengine-reference/tools/tdengine-cli/), note the following:
|
||||
|
||||
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
|
||||
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes must be used.
|
||||
|
||||
Login with user `user1`:
|
||||
|
||||
```shell
|
||||
taos -u user1 -p'Ab1!@#$%^&*()-_+=[]{}'
|
||||
taos -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="taosdump" value="taosdump">
|
||||
|
||||
In [taosdump](../../tdengine-reference/tools/taosdump/), note the following:
|
||||
|
||||
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
|
||||
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used.
|
||||
|
||||
Backup database `test` with user `user1`:
|
||||
|
||||
```shell
|
||||
taosdump -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -D test
|
||||
taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Benchmark" value="benchmark">
|
||||
|
||||
In [taosBenchmark](../../tdengine-reference/tools/taosbenchmark/), note the following:
|
||||
|
||||
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
|
||||
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used.
|
||||
|
||||
Example of data write test with user `user1`:
|
||||
|
||||
```shell
|
||||
taosBenchmark -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -d test -y
|
||||
```
|
||||
|
||||
When using `taosBenchmark -f <JSON>`, there are no restrictions on the password in the JSON file.
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="taosX" value="taosx">
|
||||
|
||||
[taosX](../../tdengine-reference/components/taosx/) uses DSN to represent TDengine connections, in the format: `(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`, where `<pass>` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`.
|
||||
|
||||
Example of exporting data with user `user1`:
|
||||
|
||||
```shell
|
||||
taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' \
|
||||
-t 'csv:./test.csv'
|
||||
```
|
||||
|
||||
Note that if the password can be URL decoded, the URL decoded result will be used as the password. For example: `taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` is equivalent to `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041`.
|
||||
|
||||
No special handling is required in [Explorer](../../tdengine-reference/components/taosexplorer/), just use it directly.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Java" value="java">
|
||||
|
||||
When using special character passwords in JDBC, the password needs to be URL encoded, as shown below:
|
||||
|
||||
```java
|
||||
package com.taosdata.example;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Properties;
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
|
||||
public class JdbcPassDemo {
|
||||
public static void main(String[] args) throws Exception {
|
||||
String password = "Ab1!@#$%^&*()-_+=[]{}";
|
||||
String encodedPassword = URLEncoder.encode(password, StandardCharsets.UTF_8.toString());
|
||||
String jdbcUrl = "jdbc:TAOS-WS://localhost:6041";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "user1");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, encodedPassword);
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) {
|
||||
System.out.println("Connected to " + jdbcUrl + " successfully.");
|
||||
|
||||
// you can use the connection for execute SQL here
|
||||
|
||||
} catch (Exception ex) {
|
||||
// please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.printf("Failed to connect to %s, %sErrMessage: %s%n",
|
||||
jdbcUrl,
|
||||
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
|
||||
ex.getMessage());
|
||||
// Print stack trace for context in examples. Use logging in production.
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
No special handling is required for special character passwords in Python, as shown below:
|
||||
|
||||
```python
|
||||
import taos
|
||||
import taosws
|
||||
|
||||
|
||||
def create_connection():
|
||||
host = "localhost"
|
||||
port = 6030
|
||||
return taos.connect(
|
||||
user="user1",
|
||||
password="Ab1!@#$%^&*()-_+=[]{}",
|
||||
host=host,
|
||||
port=port,
|
||||
)
|
||||
|
||||
def create_ws_connection():
|
||||
host = "localhost"
|
||||
port = 6041
|
||||
return taosws.connect(
|
||||
user="user1",
|
||||
password="Ab1!@#$%^&*()-_+=[]{}",
|
||||
host=host,
|
||||
port=port,
|
||||
)
|
||||
|
||||
|
||||
def show_databases(conn):
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("show databases")
|
||||
print(cursor.fetchall())
|
||||
cursor.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Connect with native protocol")
|
||||
conn = create_connection()
|
||||
show_databases(conn)
|
||||
print("Connect with websocket protocol")
|
||||
conn = create_ws_connection()
|
||||
show_databases(conn)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Go" value="go">
|
||||
|
||||
Starting from version 3.6.0, Go supports passwords containing special characters, which need to be encoded using encodeURIComponent.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
|
||||
_ "github.com/taosdata/driver-go/v3/taosWS"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var user = "user1"
|
||||
var password = "Ab1!@#$%^&*()-_+=[]{}"
|
||||
var encodedPassword = url.QueryEscape(password)
|
||||
var taosDSN = user + ":" + encodedPassword + "@ws(localhost:6041)/"
|
||||
taos, err := sql.Open("taosWS", taosDSN)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error())
|
||||
}
|
||||
fmt.Println("Connected to " + taosDSN + " successfully.")
|
||||
defer taos.Close()
|
||||
}
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Rust" value="rust">
|
||||
|
||||
In Rust, DSN is used to represent TDengine connections, in the format: `(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`, where `<pass>` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`.
|
||||
|
||||
```rust
|
||||
let dsn = "taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041";
|
||||
let connection = TaosBuilder::from_dsn(&dsn)?.build().await?;
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Node.js" value="node">
|
||||
|
||||
Starting from version 3.1.5, the Node.js connector supports passwords containing all valid characters.
|
||||
|
||||
```js
|
||||
const taos = require("@tdengine/websocket");
|
||||
|
||||
let dsn = 'ws://localhost:6041';
|
||||
async function createConnect() {
|
||||
try {
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('user1');
|
||||
conf.setPwd('Ab1!@#$%^&*()-_+=[]{}');
|
||||
conf.setDb('test');
|
||||
conn = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
return conn;
|
||||
} catch (err) {
|
||||
console.log("Connection failed with code: " + err.code + ", message: " + err.message);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
createConnect()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="C#" value="csharp">
|
||||
|
||||
When using passwords in C#, note that connection strings do not support semicolons (as semicolons are delimiters). In this case, you can construct the `ConnectionStringBuilder` without a password, and then set the username and password.
|
||||
|
||||
As shown below:
|
||||
|
||||
```csharp
|
||||
var builder = new ConnectionStringBuilder("host=localhost;port=6030");
|
||||
builder.Username = "user1";
|
||||
builder.Password = "Ab1!@#$%^&*()-_+=[]{}";
|
||||
using (var client = DbDriver.Open(builder)){}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
There are no restrictions on passwords in C.
|
||||
|
||||
```c
|
||||
TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6030);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="REST" value="rest">
|
||||
|
||||
When using passwords in REST API, note the following:
|
||||
|
||||
- Passwords use Basic Auth, in the format `Authorization: Basic base64(<user>:<pass>)`.
|
||||
- Passwords containing colons `:` are not supported.
|
||||
|
||||
The following two methods are equivalent:
|
||||
|
||||
```shell
|
||||
curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' \
|
||||
-d 'show databases' http://localhost:6041/rest/sql
|
||||
curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' \
|
||||
-d 'show databases' http://localhost:6041/rest/sql
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
|
@ -9,6 +9,7 @@ TARGETS = connect_example \
|
|||
with_reqid_demo \
|
||||
sml_insert_demo \
|
||||
stmt_insert_demo \
|
||||
stmt2_insert_demo \
|
||||
tmq_demo
|
||||
|
||||
SOURCES = connect_example.c \
|
||||
|
@ -18,6 +19,7 @@ SOURCES = connect_example.c \
|
|||
with_reqid_demo.c \
|
||||
sml_insert_demo.c \
|
||||
stmt_insert_demo.c \
|
||||
stmt2_insert_demo.c \
|
||||
tmq_demo.c
|
||||
|
||||
LIBS = -ltaos -lpthread
|
||||
|
@ -31,4 +33,4 @@ $(TARGETS):
|
|||
$(CC) $(CFLAGS) -o $@ $(wildcard $(@F).c) $(LIBS)
|
||||
|
||||
clean:
|
||||
rm -f $(TARGETS)
|
||||
rm -f $(TARGETS)
|
||||
|
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||
// to compile: gcc -o stmt2_insert_demo stmt2_insert_demo.c -ltaos
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/time.h>
|
||||
#include "taos.h"
|
||||
|
||||
#define NUM_OF_SUB_TABLES 10
|
||||
#define NUM_OF_ROWS 10
|
||||
|
||||
/**
|
||||
* @brief Executes an SQL query and checks for errors.
|
||||
*
|
||||
* @param taos Pointer to TAOS connection.
|
||||
* @param sql SQL query string.
|
||||
*/
|
||||
void executeSQL(TAOS *taos, const char *sql) {
|
||||
TAOS_RES *res = taos_query(taos, sql);
|
||||
int code = taos_errno(res);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "Error: %s\n", taos_errstr(res));
|
||||
taos_free_result(res);
|
||||
taos_close(taos);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
taos_free_result(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Checks return status and exits if an error occurs.
|
||||
*
|
||||
* @param stmt2 Pointer to TAOS_STMT2.
|
||||
* @param code Error code.
|
||||
* @param msg Error message prefix.
|
||||
*/
|
||||
void checkErrorCode(TAOS_STMT2 *stmt2, int code, const char *msg) {
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "%s. Code: %d, Error: %s\n", msg, code, taos_stmt2_error(stmt2));
|
||||
taos_stmt2_close(stmt2);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Prepares data bindings for batch insertion.
|
||||
*
|
||||
* @param table_name Pointer to store allocated table names.
|
||||
* @param tags Pointer to store allocated tag bindings.
|
||||
* @param params Pointer to store allocated parameter bindings.
|
||||
*/
|
||||
void prepareBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) {
|
||||
*table_name = (char **)malloc(NUM_OF_SUB_TABLES * sizeof(char *));
|
||||
*tags = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *));
|
||||
*params = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *));
|
||||
|
||||
for (int i = 0; i < NUM_OF_SUB_TABLES; i++) {
|
||||
// Allocate and assign table name
|
||||
(*table_name)[i] = (char *)malloc(20 * sizeof(char));
|
||||
sprintf((*table_name)[i], "d_bind_%d", i);
|
||||
|
||||
// Allocate memory for tags data
|
||||
int *gid = (int *)malloc(sizeof(int));
|
||||
int *gid_len = (int *)malloc(sizeof(int));
|
||||
*gid = i;
|
||||
*gid_len = sizeof(int);
|
||||
|
||||
char *location = (char *)malloc(20 * sizeof(char));
|
||||
int *location_len = (int *)malloc(sizeof(int));
|
||||
*location_len = sprintf(location, "location_%d", i);
|
||||
|
||||
(*tags)[i] = (TAOS_STMT2_BIND *)malloc(2 * sizeof(TAOS_STMT2_BIND));
|
||||
(*tags)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, gid, gid_len, NULL, 1};
|
||||
(*tags)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, location, location_len, NULL, 1};
|
||||
|
||||
// Allocate memory for columns data
|
||||
(*params)[i] = (TAOS_STMT2_BIND *)malloc(4 * sizeof(TAOS_STMT2_BIND));
|
||||
|
||||
int64_t *ts = (int64_t *)malloc(NUM_OF_ROWS * sizeof(int64_t));
|
||||
float *current = (float *)malloc(NUM_OF_ROWS * sizeof(float));
|
||||
int *voltage = (int *)malloc(NUM_OF_ROWS * sizeof(int));
|
||||
float *phase = (float *)malloc(NUM_OF_ROWS * sizeof(float));
|
||||
int32_t *ts_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
int32_t *current_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
int32_t *voltage_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
int32_t *phase_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
|
||||
(*params)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_TIMESTAMP, ts, ts_len, NULL, NUM_OF_ROWS};
|
||||
(*params)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, current, current_len, NULL, NUM_OF_ROWS};
|
||||
(*params)[i][2] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, voltage, voltage_len, NULL, NUM_OF_ROWS};
|
||||
(*params)[i][3] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, phase, phase_len, NULL, NUM_OF_ROWS};
|
||||
|
||||
for (int j = 0; j < NUM_OF_ROWS; j++) {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
ts[j] = tv.tv_sec * 1000LL + tv.tv_usec / 1000 + j;
|
||||
current[j] = (float)rand() / RAND_MAX * 30;
|
||||
voltage[j] = rand() % 300;
|
||||
phase[j] = (float)rand() / RAND_MAX;
|
||||
|
||||
ts_len[j] = sizeof(int64_t);
|
||||
current_len[j] = sizeof(float);
|
||||
voltage_len[j] = sizeof(int);
|
||||
phase_len[j] = sizeof(float);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Frees allocated memory for binding data.
|
||||
*
|
||||
* @param table_name Pointer to allocated table names.
|
||||
* @param tags Pointer to allocated tag bindings.
|
||||
* @param params Pointer to allocated parameter bindings.
|
||||
*/
|
||||
void freeBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) {
|
||||
for (int i = 0; i < NUM_OF_SUB_TABLES; i++) {
|
||||
free((*table_name)[i]);
|
||||
for (int j = 0; j < 2; j++) {
|
||||
free((*tags)[i][j].buffer);
|
||||
free((*tags)[i][j].length);
|
||||
}
|
||||
free((*tags)[i]);
|
||||
|
||||
for (int j = 0; j < 4; j++) {
|
||||
free((*params)[i][j].buffer);
|
||||
free((*params)[i][j].length);
|
||||
}
|
||||
free((*params)[i]);
|
||||
}
|
||||
free(*table_name);
|
||||
free(*tags);
|
||||
free(*params);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Inserts data using the TAOS stmt2 API.
|
||||
*
|
||||
* @param taos Pointer to TAOS connection.
|
||||
*/
|
||||
void insertData(TAOS *taos) {
|
||||
TAOS_STMT2_OPTION option = {0, false, false, NULL, NULL};
|
||||
TAOS_STMT2 *stmt2 = taos_stmt2_init(taos, &option);
|
||||
if (!stmt2) {
|
||||
fprintf(stderr, "Failed to initialize TAOS statement.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
// stmt2 prepare sql
|
||||
checkErrorCode(stmt2, taos_stmt2_prepare(stmt2, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 0),
|
||||
"Statement preparation failed");
|
||||
|
||||
char **table_name;
|
||||
TAOS_STMT2_BIND **tags, **params;
|
||||
prepareBindData(&table_name, &tags, ¶ms);
|
||||
// stmt2 bind batch
|
||||
TAOS_STMT2_BINDV bindv = {NUM_OF_SUB_TABLES, table_name, tags, params};
|
||||
checkErrorCode(stmt2, taos_stmt2_bind_param(stmt2, &bindv, -1), "Parameter binding failed");
|
||||
// stmt2 exec batch
|
||||
int affected;
|
||||
checkErrorCode(stmt2, taos_stmt2_exec(stmt2, &affected), "Execution failed");
|
||||
printf("Successfully inserted %d rows.\n", affected);
|
||||
// free and close
|
||||
freeBindData(&table_name, &tags, ¶ms);
|
||||
taos_stmt2_close(stmt2);
|
||||
}
|
||||
|
||||
int main() {
|
||||
const char *host = "localhost";
|
||||
const char *user = "root";
|
||||
const char *password = "taosdata";
|
||||
uint16_t port = 6030;
|
||||
TAOS *taos = taos_connect(host, user, password, NULL, port);
|
||||
if (taos == NULL) {
|
||||
fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL),
|
||||
taos_errstr(NULL));
|
||||
taos_cleanup();
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
// create database and table
|
||||
executeSQL(taos, "CREATE DATABASE IF NOT EXISTS power");
|
||||
executeSQL(taos, "USE power");
|
||||
executeSQL(taos,
|
||||
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
|
||||
"(groupId INT, location BINARY(24))");
|
||||
insertData(taos);
|
||||
taos_close(taos);
|
||||
taos_cleanup();
|
||||
}
|
|
@ -4,7 +4,7 @@
|
|||
"main": "index.js",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@tdengine/websocket": "^3.1.2"
|
||||
"@tdengine/websocket": "^3.1.5"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
const { sleep } = require("@tdengine/websocket");
|
||||
const taos = require("@tdengine/websocket");
|
||||
|
||||
// ANCHOR: create_consumer
|
||||
|
@ -52,6 +51,12 @@ async function prepare() {
|
|||
await wsSql.close();
|
||||
}
|
||||
|
||||
const delay = function(ms) {
|
||||
return new Promise(function(resolve) {
|
||||
setTimeout(resolve, ms);
|
||||
});
|
||||
};
|
||||
|
||||
async function insert() {
|
||||
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||
conf.setUser('root');
|
||||
|
@ -60,7 +65,7 @@ async function insert() {
|
|||
let wsSql = await taos.sqlConnect(conf);
|
||||
for (let i = 0; i < 50; i++) {
|
||||
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
|
||||
await sleep(100);
|
||||
await delay(100);
|
||||
}
|
||||
await wsSql.close();
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
const { sleep } = require("@tdengine/websocket");
|
||||
const taos = require("@tdengine/websocket");
|
||||
|
||||
const db = 'power';
|
||||
|
|
|
@ -0,0 +1,207 @@
|
|||
# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
||||
# ┃ ██████ ██████ ██████ █ █ █ █ █ █▄ ▀███ █ ┃
|
||||
# ┃ ▄▄▄▄▄█ █▄▄▄▄▄ ▄▄▄▄▄█ ▀▀▀▀▀█▀▀▀▀▀ █ ▀▀▀▀▀█ ████████▌▐███ ███▄ ▀█ █ ▀▀▀▀▀ ┃
|
||||
# ┃ █▀▀▀▀▀ █▀▀▀▀▀ █▀██▀▀ ▄▄▄▄▄ █ ▄▄▄▄▄█ ▄▄▄▄▄█ ████████▌▐███ █████▄ █ ▄▄▄▄▄ ┃
|
||||
# ┃ █ ██████ █ ▀█▄ █ ██████ █ ███▌▐███ ███████▄ █ ┃
|
||||
# ┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┫
|
||||
# ┃ Copyright (c) 2017, the Perspective Authors. ┃
|
||||
# ┃ ╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌ ┃
|
||||
# ┃ This file is part of the Perspective library, distributed under the terms ┃
|
||||
# ┃ of the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). ┃
|
||||
# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
||||
|
||||
import logging
|
||||
import tornado.websocket
|
||||
import tornado.web
|
||||
import tornado.ioloop
|
||||
from datetime import date, datetime
|
||||
import perspective
|
||||
import perspective.handlers.tornado
|
||||
import json
|
||||
import taosws
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger('main')
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TDengine connection parameters
|
||||
# =============================================================================
|
||||
TAOS_HOST = "localhost" # TDengine server host
|
||||
TAOS_PORT = 6041 # TDengine server port
|
||||
TAOS_USER = "root" # TDengine username
|
||||
TAOS_PASSWORD = "taosdata" # TDengine password
|
||||
|
||||
TAOS_DATABASE = "power" # TDengine database name
|
||||
TAOS_TABLENAME = "meters" # TDengine table name
|
||||
|
||||
# =============================================================================
|
||||
# Perspective server parameters
|
||||
# =============================================================================
|
||||
PERSPECTIVE_TABLE_NAME = "meters_values" # name of the Perspective table
|
||||
PERSPECTIVE_REFRESH_RATE = 250 # refresh rate in milliseconds
|
||||
|
||||
|
||||
class CustomJSONEncoder(json.JSONEncoder):
|
||||
"""
|
||||
Custom JSON encoder that serializes datetime and date objects
|
||||
"""
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, date):
|
||||
return obj.isoformat()
|
||||
return super().default(obj)
|
||||
|
||||
|
||||
json.JSONEncoder.default = CustomJSONEncoder().default
|
||||
|
||||
|
||||
def convert_ts(ts) -> datetime:
|
||||
"""
|
||||
Convert a timestamp string to a datetime object
|
||||
"""
|
||||
for fmt in ('%Y-%m-%d %H:%M:%S.%f %z', '%Y-%m-%d %H:%M:%S %z'):
|
||||
try:
|
||||
return datetime.strptime(ts, fmt)
|
||||
except ValueError:
|
||||
continue
|
||||
raise ValueError(f"Time data '{ts}' does not match any format")
|
||||
|
||||
|
||||
def create_tdengine_connection(
|
||||
host: str = TAOS_HOST,
|
||||
port: int = TAOS_PORT,
|
||||
user: str = TAOS_USER,
|
||||
password: str = TAOS_PASSWORD,
|
||||
) -> taosws.Connection:
|
||||
try:
|
||||
# connect to the tdengine server
|
||||
conn = taosws.connect(
|
||||
user=user,
|
||||
password=password,
|
||||
host=host,
|
||||
port=port,
|
||||
)
|
||||
# switch to the right database
|
||||
conn.execute(f"USE {TAOS_DATABASE}")
|
||||
# connection successful
|
||||
logger.info(f"Connected to tdengine successfully: {host}:{port}")
|
||||
return conn
|
||||
except Exception as err:
|
||||
logger.error(f"Failed to connect to tdengine: {host}:{port} -- ErrMessage: {err}")
|
||||
raise err
|
||||
|
||||
|
||||
def read_tdengine(
|
||||
conn: taosws.Connection,
|
||||
) -> list[dict]:
|
||||
try:
|
||||
# query the database
|
||||
sql = f"""
|
||||
SELECT `ts`, location, groupid, current, voltage, phase
|
||||
FROM {TAOS_TABLENAME}
|
||||
WHERE `ts` >= NOW() - 12h
|
||||
ORDER BY `ts` DESC
|
||||
LIMIT 1000
|
||||
"""
|
||||
logger.debug(f"Executing query: {sql}")
|
||||
res = conn.query(sql)
|
||||
data = [
|
||||
{
|
||||
"timestamp": convert_ts(row[0]),
|
||||
"location": row[1],
|
||||
"groupid": row[2],
|
||||
"current": row[3],
|
||||
"voltage": row[4],
|
||||
"phase": row[5],
|
||||
}
|
||||
for row in res
|
||||
]
|
||||
logger.info(f"select result: {data}")
|
||||
return data
|
||||
except Exception as err:
|
||||
logger.error(f"Failed to query tdengine: {err}")
|
||||
raise err
|
||||
|
||||
|
||||
// ANCHOR: perspective_server
|
||||
def perspective_thread(perspective_server: perspective.Server, tdengine_conn: taosws.Connection):
|
||||
"""
|
||||
Create a new Perspective table and update it with new data every 50ms
|
||||
"""
|
||||
# create a new Perspective table
|
||||
client = perspective_server.new_local_client()
|
||||
schema = {
|
||||
"timestamp": datetime,
|
||||
"location": str,
|
||||
"groupid": int,
|
||||
"current": float,
|
||||
"voltage": int,
|
||||
"phase": float,
|
||||
}
|
||||
# define the table schema
|
||||
table = client.table(
|
||||
schema,
|
||||
limit=1000, # maximum number of rows in the table
|
||||
name=PERSPECTIVE_TABLE_NAME, # table name. Use this with perspective-viewer on the client side
|
||||
)
|
||||
logger.info("Created new Perspective table")
|
||||
|
||||
# update with new data
|
||||
def updater():
|
||||
data = read_tdengine(tdengine_conn)
|
||||
table.update(data)
|
||||
logger.debug(f"Updated Perspective table: {len(data)} rows")
|
||||
|
||||
logger.info(f"Starting tornado ioloop update loop every {PERSPECTIVE_REFRESH_RATE} milliseconds")
|
||||
# start the periodic callback to update the table data
|
||||
callback = tornado.ioloop.PeriodicCallback(callback=updater, callback_time=PERSPECTIVE_REFRESH_RATE)
|
||||
callback.start()
|
||||
|
||||
// ANCHOR_END: perspective_server
|
||||
|
||||
def make_app(perspective_server):
|
||||
"""
|
||||
Create a new Tornado application with a websocket handler that
|
||||
serves a Perspective table. PerspectiveTornadoHandler handles
|
||||
the websocket connection and streams the Perspective table changes
|
||||
to the client.
|
||||
"""
|
||||
return tornado.web.Application([
|
||||
(
|
||||
r"/websocket", # websocket endpoint. Use this URL to configure the websocket client OR Prospective Server adapter
|
||||
perspective.handlers.tornado.PerspectiveTornadoHandler, # PerspectiveTornadoHandler handles perspective table updates <-> websocket client
|
||||
{"perspective_server": perspective_server}, # pass the perspective server to the handler
|
||||
),
|
||||
])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logger.info("TDEngine <-> Perspective Demo")
|
||||
|
||||
# create a new Perspective server
|
||||
logger.info("Creating new Perspective server")
|
||||
perspective_server = perspective.Server()
|
||||
# create the tdengine connection
|
||||
logger.info("Creating new TDEngine connection")
|
||||
tdengine_conn = create_tdengine_connection()
|
||||
|
||||
# setup and start the Tornado app
|
||||
logger.info("Creating Tornado server")
|
||||
app = make_app(perspective_server)
|
||||
app.listen(8085, address='0.0.0.0')
|
||||
logger.info("Listening on http://localhost:8080")
|
||||
|
||||
try:
|
||||
# start the io loop
|
||||
logger.info("Starting ioloop to update Perspective table data via tornado websocket...")
|
||||
loop = tornado.ioloop.IOLoop.current()
|
||||
loop.call_later(0, perspective_thread, perspective_server, tdengine_conn)
|
||||
loop.start()
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("Keyboard interrupt detected. Shutting down tornado server...")
|
||||
loop.stop()
|
||||
loop.close()
|
||||
logging.info("Shut down")
|
|
@ -0,0 +1,135 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Perspective Viewer Dashboard</title>
|
||||
<link rel="stylesheet" crossorigin="anonymous"
|
||||
href="https://unpkg.com/@finos/perspective-viewer/dist/css/themes.css"/>
|
||||
<style>
|
||||
/* define the layout of the entire dashboard */
|
||||
#dashboard {
|
||||
display: grid;
|
||||
/* define a grid layout with two rows and two columns */
|
||||
grid-template-columns: 1fr 1fr;
|
||||
grid-template-rows: auto auto auto;
|
||||
gap: 20px;
|
||||
padding: 20px;
|
||||
/* limit the maximum height of the Dashboard to the viewport height */
|
||||
max-height: 100vh;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
/* define the style */
|
||||
.viewer-container {
|
||||
/* adjust the height of the container to ensure it can be displayed on one screen */
|
||||
height: calc((100vh - 30px) / 2);
|
||||
width: 100%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
background-color: #333;
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
perspective-viewer {
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: #242526;
|
||||
color: white;
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- introduce JavaScript files related to Perspective Viewer -->
|
||||
<script type="module" src="https://unpkg.com/@finos/perspective@3.1.3/dist/cdn/perspective.js"></script>
|
||||
<script type="module" src="https://unpkg.com/@finos/perspective-viewer@3.1.3/dist/cdn/perspective-viewer.js"></script>
|
||||
<script type="module"
|
||||
src="https://unpkg.com/@finos/perspective-viewer-datagrid@3.1.3/dist/cdn/perspective-viewer-datagrid.js"></script>
|
||||
<script type="module"
|
||||
src="https://unpkg.com/@finos/perspective-viewer-d3fc@3.1.3/dist/cdn/perspective-viewer-d3fc.js"></script>
|
||||
|
||||
// ANCHOR: perspective_viewer
|
||||
<script type="module">
|
||||
// import the Perspective library
|
||||
import perspective from "https://unpkg.com/@finos/perspective@3.1.3/dist/cdn/perspective.js";
|
||||
|
||||
document.addEventListener("DOMContentLoaded", async function () {
|
||||
// an asynchronous function for loading the view
|
||||
async function load_viewer(viewerId, config) {
|
||||
try {
|
||||
const table_name = "meters_values";
|
||||
const viewer = document.getElementById(viewerId);
|
||||
// connect WebSocket server
|
||||
const websocket = await perspective.websocket("ws://localhost:8085/websocket");
|
||||
// open server table
|
||||
const server_table = await websocket.open_table(table_name);
|
||||
// load the table into the view
|
||||
await viewer.load(server_table);
|
||||
// use view configuration
|
||||
await viewer.restore(config);
|
||||
} catch (error) {
|
||||
console.error('发生错误:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// configuration of the view
|
||||
const config1 = {
|
||||
"version": "3.3.1", // Perspective library version (compatibility identifier)
|
||||
"plugin": "Datagrid", // View mode: Datagrid (table) or D3FC (chart)
|
||||
"plugin_config": { // Plugin-specific configuration
|
||||
"columns": {
|
||||
"current": {
|
||||
"width": 150 // Column width in pixels
|
||||
}
|
||||
},
|
||||
"edit_mode": "READ_ONLY", // Edit mode: READ_ONLY (immutable) or EDIT (editable)
|
||||
"scroll_lock": false // Whether to lock scroll position
|
||||
},
|
||||
"columns_config": {}, // Custom column configurations (colors, formatting, etc.)
|
||||
"settings": true, // Whether to show settings panel (true/false)
|
||||
"theme": "Power Meters", // Custom theme name (must be pre-defined)
|
||||
"title": "Meters list data", // View title
|
||||
"group_by": ["location", "groupid"], // Row grouping fields (equivalent to `row_pivots`)
|
||||
"split_by": [], // Column grouping fields (equivalent to `column_pivots`)
|
||||
"columns": [ // Columns to display (in order)
|
||||
"timestamp",
|
||||
"location",
|
||||
"current",
|
||||
"voltage",
|
||||
"phase"
|
||||
],
|
||||
"filter": [], // Filter conditions (triplet format array)
|
||||
"sort": [], // Sorting rules (format: [field, direction])
|
||||
"expressions": {}, // Custom expressions (e.g., calculated columns)
|
||||
"aggregates": { // Aggregation function configuration
|
||||
"timestamp": "last", // Aggregation: last (takes the latest value)
|
||||
"voltage": "last", // Aggregation: last
|
||||
"phase": "last", // Aggregation: last
|
||||
"current": "last" // Aggregation: last
|
||||
}
|
||||
};
|
||||
|
||||
// load the first view
|
||||
await load_viewer("prsp-viewer-1", config1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<!-- define the HTML Structure of the Dashboard -->
|
||||
<div id="dashboard">
|
||||
<div class="viewer-container">
|
||||
<perspective-viewer id="prsp-viewer-1" theme="Pro Dark"></perspective-viewer>
|
||||
</div>
|
||||
</div>
|
||||
// ANCHOR_END: perspective_viewer
|
||||
</body>
|
||||
|
||||
</html>
|
|
@ -182,7 +182,7 @@ def test_json_to_taos(consumer: Consumer):
|
|||
'voltage': 105,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None, leader_epoch=0),
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value=json.dumps({'table_name': 'd1',
|
||||
'ts': '2022-12-06 15:13:39.643',
|
||||
|
@ -190,7 +190,7 @@ def test_json_to_taos(consumer: Consumer):
|
|||
'voltage': 102,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
|
||||
]
|
||||
]
|
||||
|
||||
|
@ -203,11 +203,11 @@ def test_line_to_taos(consumer: Consumer):
|
|||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value="d0 values('2023-01-01 00:00:00.001', 3.49, 109, 0.02737)".encode('utf-8'),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value="d1 values('2023-01-01 00:00:00.002', 6.19, 112, 0.09171)".encode('utf-8'),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
|
||||
]
|
||||
]
|
||||
consumer._line_to_taos(messages=records)
|
||||
|
|
|
@ -77,6 +77,22 @@ toc_max_heading_level: 4
|
|||
|
||||

|
||||
|
||||
### 虚拟表
|
||||
|
||||
“一个设备一张表”的设计解决了工业和物联网等场景下的大多数时序数据管理和分析难题,但是在遇到更复杂的场景时,这种设计受到了设备复杂性的挑战。这种复杂性的根源在于一个设备无法简单的用一个或一组数据采集点来描述或管理,而业务分析往往需要综合多个或多组采集点的数据才能完成。以汽车或发电风机为例,整个设备(汽车或风机)中含有非常大量的传感器(数据采集点),这些传感器的输出和采集频率千差万别。一个超级表只能描述其中一种传感器,当需要综合多个传感器的数据进行分析计算时,只能通过多级关联查询的方式来进行,而这往往会导致易用性和性能方面的问题。
|
||||
|
||||
为了解决这个问题,TDengine 引入虚拟表(Virtual Table,简称为 VTable)的概念。虚拟表是一种不存储实际数据而可以用于分析计算的表,它的数据来源为其它真实存储数据的子表、普通表,通过将不同列数据按照时间戳排序、对齐、合并的方式来生成虚拟表。同真实表类似,虚拟表也可以分为虚拟超级表、虚拟子表、虚拟普通表。虚拟超级表可以是一个设备或一组分析计算所需数据的完整集合,每个虚拟子表可以根据需要引用相同或不同的列,因此可以灵活地根据业务需要进行定义,最终可以达到千表千面的效果。虚拟表不能写入、删除数据,在查询使用上同真实表基本相同,支持虚拟超级表、虚拟子表、虚拟普通表上的任何查询。唯一的区别在于虚拟表的数据是每次查询计算时动态生成的,只有一个查询中引用的列才会被合并进虚拟表中,因此同一个虚拟表在不同的查询中所呈现的数据可能是不同的。
|
||||
|
||||
虚拟超级表的主要功能特点包括:
|
||||
1. 列选择与拼接 <br />
|
||||
用户可以从多个原始表中选择指定的列,按需组合到一张虚拟表中,形成统一的数据视图。
|
||||
2. 基于时间戳对齐 <br />
|
||||
以时间戳为依据对数据进行对齐,如果多个表在相同时间戳下存在数据,则对应列的值组合成同一行;若部分表在该时间戳下无数据,则对应列填充为 NULL。
|
||||
3. 动态更新 <br />
|
||||
虚拟表根据原始表的数据变化自动更新,确保数据的实时性。虚拟表不需实际存储,计算在生成时动态完成。
|
||||
|
||||
通过引入虚拟表的概念,现在 TDengine 可以非常方便的管理更大更复杂的设备数据。无论每个采集点如何建模(单列 or 多列),无论这些采集点的数据是分布在一个或多个库中,我们现在都可以通过定义虚拟子表的方式跨库跨表任意指定数据源,通过虚拟超级表的方式进行跨设备、跨分析的聚合运算,从此“一个设备一张表”彻底成为现实。
|
||||
|
||||
### 库
|
||||
|
||||
库是 TDengine 中用于管理一组表的集合。TDengine 允许一个运行实例包含多个库,并且每个库都可以配置不同的存储策略。由于不同类型的数据采集点通常具有不同的数据特征,如数据采集频率、数据保留期限、副本数量、数据块大小等。为了在各种场景下确保 TDengine 能够发挥最大效率,建议将具有不同数据特征的超级表创建在不同的库中。
|
||||
|
@ -93,6 +109,7 @@ toc_max_heading_level: 4
|
|||
|
||||
在查询数据时,TDengine 客户端会根据应用程序当前的时区设置,自动将保存的 UTC 时间戳转换成本地时间进行显示,确保用户在不同时区下都能看到正确的时间信息。
|
||||
|
||||
|
||||
## 数据建模
|
||||
|
||||
本节用智能电表做例子,简要的介绍如何在 TDengine 里使用 SQL 创建数据库、超级表、表的基本操作。
|
||||
|
@ -215,3 +232,177 @@ TDengine 支持灵活的数据模型设计,包括多列模型和单列模型
|
|||
尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。
|
||||
|
||||
总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。
|
||||
|
||||
### 创建虚拟表
|
||||
|
||||
无论是选择单列模型还是多列模型,TDengine 都可以通过使用虚拟表进行跨表的运算。为智能电表为例,这里介绍虚拟表的两种使用场景:
|
||||
|
||||
1. 单源多维度时序聚合
|
||||
2. 跨源采集量对比分析
|
||||
|
||||
#### 单源多维度时序聚合
|
||||
在单源多维度时序聚合场景中,“单源”并非指单一物理表,而是指来自**同一数据采集点**下的多个单列时序数据表。这些数据因业务需求或其他限制被拆分为多个单列存储的表,但通过设备标签和时间基准保持逻辑一致性。虚拟表在此场景中的作用是将一个采集点中“纵向“拆分的数据,还原为完整的“横向”状态。
|
||||
例如,在建模时采用了单列模型,对于电流、电压和相位这 3 种物理量,分别建立 3 张超级表。在这种场景下,用户可以通过虚拟表将这 3 种不同的采集量聚合到一张表中,以便进行统一的查询和分析。
|
||||
|
||||
创建单列模型的超级表的 SQL 如下:
|
||||
|
||||
```sql
|
||||
|
||||
CREATE STABLE current_stb (
|
||||
ts timestamp,
|
||||
current float
|
||||
) TAGS (
|
||||
device_id varchar(64),
|
||||
location varchar(64),
|
||||
group_id int
|
||||
);
|
||||
|
||||
CREATE STABLE voltage_stb (
|
||||
ts timestamp,
|
||||
voltage int
|
||||
) TAGS (
|
||||
device_id varchar(64),
|
||||
location varchar(64),
|
||||
group_id int
|
||||
);
|
||||
|
||||
CREATE STABLE phase_stb (
|
||||
ts timestamp,
|
||||
phase float
|
||||
) TAGS (
|
||||
device_id varchar(64),
|
||||
location varchar(64),
|
||||
group_id int
|
||||
);
|
||||
```
|
||||
|
||||
假设分别有 d1001,d1002,d1003,d1004 四个设备,分别对四个设备的电流、电压、相位采集量创建子表,SQL 如下:
|
||||
|
||||
```sql
|
||||
create table current_d1001 using current_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
|
||||
create table current_d1002 using current_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
|
||||
create table current_d1003 using current_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
|
||||
create table current_d1004 using current_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
|
||||
|
||||
create table voltage_d1001 using voltage_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
|
||||
create table voltage_d1002 using voltage_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
|
||||
create table voltage_d1003 using voltage_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
|
||||
create table voltage_d1004 using voltage_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
|
||||
|
||||
create table phase_d1001 using phase_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
|
||||
create table phase_d1002 using phase_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
|
||||
create table phase_d1003 using phase_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
|
||||
create table phase_d1004 using phase_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
|
||||
```
|
||||
|
||||
此时想要通过一张虚拟超级表来讲这三种采集量聚合到一张表中,创建虚拟超级表 SQL 如下:
|
||||
|
||||
```sql
|
||||
CREATE STABLE meters_v (
|
||||
ts timestamp,
|
||||
current float,
|
||||
voltage int,
|
||||
phase float
|
||||
) TAGS (
|
||||
location varchar(64),
|
||||
group_id int
|
||||
) VIRTUAL 1;
|
||||
```
|
||||
|
||||
并且对四个设备 d1001,d1002,d1003,d1004 分别创建虚拟子表,SQL 如下:
|
||||
|
||||
```sql
|
||||
CREATE VTABLE d1001_v (
|
||||
current from current_d1001.current,
|
||||
voltage from voltage_d1001.voltage,
|
||||
phase from phase_d1001.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.SanFrancisco",
|
||||
2
|
||||
);
|
||||
|
||||
CREATE VTABLE d1002_v (
|
||||
current from current_d1002.current,
|
||||
voltage from voltage_d1002.voltage,
|
||||
phase from phase_d1002.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.SanFrancisco",
|
||||
3
|
||||
);
|
||||
|
||||
CREATE VTABLE d1003_v (
|
||||
current from current_d1003.current,
|
||||
voltage from voltage_d1003.voltage,
|
||||
phase from phase_d1003.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.LosAngeles",
|
||||
3
|
||||
);
|
||||
|
||||
CREATE VTABLE d1004_v (
|
||||
current from current_d1004.current,
|
||||
voltage from voltage_d1004.voltage,
|
||||
phase from phase_d1004.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.LosAngeles",
|
||||
2
|
||||
);
|
||||
```
|
||||
|
||||
以设备 d1001 为例,假设 d1001 设备的电流、电压、相位数据如下:
|
||||
|
||||

|
||||
|
||||
虚拟表 d1001_v 中的数据如下 :
|
||||
|
||||
| Timestamp | Current | Voltage | Phase |
|
||||
|:--------------:|:-------:|:---------:|:-------:|
|
||||
| 1538548685000 | 10.3 | 219 | 0.31 |
|
||||
| 1538548695000 | 12.6 | 218 | 0.33 |
|
||||
| 1538548696800 | 12.3 | 221 | 0.31 |
|
||||
| 1538548697100 | 12.1 | 220 | NULL |
|
||||
| 1538548697200 | NULL | NULL | 0.32 |
|
||||
| 1538548697700 | 11.8 | NULL | NULL |
|
||||
| 1538548697800 | NULL | 222 | 0.33 |
|
||||
|
||||
#### 跨源采集量对比分析
|
||||
|
||||
在跨源采集量对比分析中,“跨源”指数据来自**不同数据采集点**。在不同数据采集点中提取具有可比语义的采集量,通过虚拟表将这些采集量按照时间戳进行对齐和合并,并进行对比分析。
|
||||
例如,用户可以将来自不同设备的电流数据聚合到一张虚拟表中,以便进行电流数据的对比分析。
|
||||
|
||||
以分析 d1001, d1002, d1003, d1004 四个设备的电流数据为例,创建虚拟表的 SQL 如下:
|
||||
|
||||
```sql
|
||||
CREATE VTABLE current_v (
|
||||
ts timestamp,
|
||||
d1001_current float from current_d1001.current,
|
||||
d1002_current float from current_d1002.current,
|
||||
d1003_current float from current_d1003.current,
|
||||
d1004_current float from current_d1004.current
|
||||
);
|
||||
```
|
||||
|
||||
假设 d1001, d1002, d1003, d1004 四个设备的电流数据如下:
|
||||
|
||||

|
||||
|
||||
虚拟表 current_v 中的数据如下:
|
||||
|
||||
| Timestamp | d1001_current | d1002_current | d1003_current | d1004_current |
|
||||
|:--------------:|:-------------:|:-------------:|:-------------:|:-------------:|
|
||||
| 1538548685000 | 10.3 | 11.7 | 11.2 | 12.4 |
|
||||
| 1538548695000 | 12.6 | 11.9 | 10.8 | 11.3 |
|
||||
| 1538548696800 | 12.3 | 12.4 | 12.3 | 10.1 |
|
||||
| 1538548697100 | 12.1 | NULL | 11.1 | NULL |
|
||||
| 1538548697200 | NULL | 12.2 | NULL | 11.7 |
|
||||
| 1538548697700 | 11.8 | 11.4 | NULL | NULL |
|
||||
| 1538548697800 | NULL | NULL | 12.1 | 12.6 |
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ INTERVAL(interval_val [, interval_offset])
|
|||
```
|
||||
|
||||
时间窗口子句包括 3 个子句:
|
||||
- INTERVAL 子句:用于产生相等时间周期的窗口,interval_val 指定每个时间窗口的大小,interval_offset 指定窗口偏移量;
|
||||
- INTERVAL 子句:用于产生相等时间周期的窗口,interval_val 指定每个时间窗口的大小,interval_offset 指定窗口偏移量;默认情况下,窗口是从 Unix time 0(1970-01-01 00:00:00 UTC)开始划分的;如果设置了 interval_offset,那么窗口的划分将从 “Unix time 0 + interval_offset” 开始;
|
||||
- SLIDING 子句:用于指定窗口向前滑动的时间;
|
||||
- FILL:用于指定窗口区间数据缺失的情况下,数据的填充模式。
|
||||
|
||||
|
@ -688,4 +688,4 @@ select a.* from meters a left asof join meters b on timetruncate(a.ts, 1s) < tim
|
|||
|
||||
查询结果顺序的限制包括如下这些。
|
||||
- 普通表、子表、subquery 且无分组条件无排序的场景下,查询结果会按照驱动表的主键列顺序输出。
|
||||
- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。
|
||||
- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。
|
||||
|
|
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 53 KiB |
|
@ -64,10 +64,10 @@ CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS DATABASE db_name;
|
|||
|
||||
## 删除主题
|
||||
|
||||
如果不再需要订阅数据,可以删除 topic,需要注意只有当前未在订阅中的 topic 才能被删除。
|
||||
如果不再需要订阅数据,可以删除 topic,如果当前 topic 被消费者订阅,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法从 v3.3.6.0 开始支持)。
|
||||
|
||||
```sql
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
|
||||
```
|
||||
|
||||
## 查看主题
|
||||
|
@ -94,9 +94,9 @@ SHOW CONSUMERS;
|
|||
|
||||
### 删除消费组
|
||||
|
||||
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是消费者组在组内没有消费者时可以通过下面语句删除:
|
||||
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是可以删除消费者组。如果当前消费者组里有消费者在消费,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法从 v3.3.6.0 开始支持)。
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
## 数据订阅
|
||||
|
@ -129,6 +129,7 @@ TDengine 的数据订阅功能支持回放(replay)功能,允许用户按
|
|||
```
|
||||
|
||||
使用数据订阅的回放功能时需要注意如下几项:
|
||||
- 通过配置消费参数 enable.replay 为 true 开启回放功能。
|
||||
- 数据订阅的回放功能仅查询订阅支持数据回放,超级表和库订阅不支持回放。
|
||||
- 回放不支持进度保存。
|
||||
- 因为数据回放本身需要处理时间,所以回放的精度存在几十毫秒的误差。
|
||||
|
|
|
@ -23,11 +23,11 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
|
|||
SUBTABLE(expression) AS subquery
|
||||
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -101,6 +101,13 @@ PARTITION 子句中,为 tbname 定义了一个别名 tname, 在 PARTITION
|
|||
|
||||
通过启用 fill_history 选项,创建的流计算任务将具备处理创建前、创建过程中以及创建后写入的数据的能力。这意味着,无论数据是在流创建之前还是之后写入的,都将纳入流计算的范围,从而确保数据的完整性和一致性。这一设置为用户提供了更大的灵活性,使其能够根据实际需求灵活处理历史数据和新数据。
|
||||
|
||||
注意:
|
||||
- 开启 fill_history 时,创建流需要找到历史数据的分界点,如果历史数据很多,可能会导致创建流任务耗时较长,此时可以通过 fill_history 1 async(v3.3.6.0 开始支持) 语法将创建流的任务放在后台处理,创建流的语句可立即返回,不阻塞后面的操作。async 只对 fill_history 1 起效,fill_history 0 时建流很快,不需要异步处理。
|
||||
|
||||
- 通过 show streams 可查看后台建流的进度(ready 状态表示成功,init 状态表示正在建流,failed 状态表示建流失败,失败时 message 列可以查看原因。对于建流失败的情况可以删除流重新建立)。
|
||||
|
||||
- 另外,不要同时异步创建多个流,可能由于事务冲突导致后面创建的流失败。
|
||||
|
||||
比如,创建一个流,统计所有智能电表每 10s 产生的数据条数,并且计算历史数据。SQL 如下:
|
||||
```sql
|
||||
create stream if not exists count_history_s fill_history 1 into count_history as select count(*) from power.meters interval(10s)
|
||||
|
@ -124,7 +131,12 @@ create stream if not exists count_history_s fill_history 1 into count_history as
|
|||
1. AT_ONCE:写入立即触发。
|
||||
2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用)。
|
||||
3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。
|
||||
4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(不支持滑动);FILL_HISTORY必须为 0,IGNORE EXPIRED 必须为 1,IGNORE UPDATE 必须为 1;FILL 只支持 PREV 、NULL、 NONE、VALUE。
|
||||
4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(支持滑动);该模式时,FILL_HISTORY 自动设置为 0,IGNORE EXPIRED 自动设置为 1,IGNORE UPDATE 自动设置为 1;FILL 只支持 PREV 、NULL、 NONE、VALUE。
|
||||
- 该模式可用于实现连续查询,比如,创建一个流,每隔 1s 查询一次过去 10s 窗口内的数据条数。SQL 如下:
|
||||
```sql
|
||||
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
|
||||
```
|
||||
5. CONTINUOUS_WINDOW_CLOSE:窗口关闭时输出结果。修改、删除数据,并不会立即触发重算,每等待 rec_time_val 时长,会进行周期性重算。如果不指定 rec_time_val,那么重算周期是60分钟。如果重算的时间长度超过 rec_time_val,在本次重算后,自动开启下一次重算。该模式当前只支持 INTERVAL 窗口。如果使用 FILL,需要配置 adapter的相关信息:adapterFqdn、adapterPort、adapterToken。adapterToken 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。
|
||||
|
||||
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||
|
||||
|
|