Merge branch '3.0' into fix/mergegpt
|
@ -0,0 +1,25 @@
|
|||
name: Cancel Workflow on Merge
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
cancel-workflow:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Workflow if Merged or Closed
|
||||
if: ${{ github.event.pull_request.merged || github.event.pull_request.state == 'closed' }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo "PR has been merged or closed, cancelling workflow..."
|
||||
gh auth status
|
||||
gh run list \
|
||||
--repo ${{ github.repository }} \
|
||||
--branch ${{ github.event.pull_request.head.ref }} \
|
||||
--workflow "TDengine Test" \
|
||||
--status in_progress \
|
||||
--status queued \
|
||||
--json databaseId --jq '.[].databaseId' | \
|
||||
xargs -I {} gh run cancel --repo ${{ github.repository }} {}
|
|
@ -1,102 +0,0 @@
|
|||
name: TDengine CI Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-TDengine
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
|
||||
jobs:
|
||||
fetch-parameters:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, testing]
|
||||
outputs:
|
||||
run_function_test: ${{ steps.parameters.outputs.run_function_test }}
|
||||
run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }}
|
||||
steps:
|
||||
- name: Determine trigger source and fetch parameters
|
||||
id: parameters
|
||||
run: |
|
||||
set -euo pipefail
|
||||
target_branch=${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Check whether to run tdgpt test cases
|
||||
cd ${{ env.WKC }}
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
|
||||
echo "changed files exclude doc: ${changed_files_non_doc}"
|
||||
|
||||
if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
|
||||
run_tdgpt_test="true"
|
||||
else
|
||||
run_tdgpt_test="false"
|
||||
fi
|
||||
echo "run tdgpt test: ${run_tdgpt_test}"
|
||||
|
||||
# Check whether to run function test cases
|
||||
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
|
||||
grep -v "^docs/en/" | \
|
||||
grep -v "^docs/zh/" | \
|
||||
grep -v ".md$" | \
|
||||
grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
|
||||
tr '\n' ' ' || :)
|
||||
echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
|
||||
|
||||
if [ -n "$changed_files_non_tdgpt" ]; then
|
||||
run_function_test="true"
|
||||
else
|
||||
run_function_test="false"
|
||||
fi
|
||||
|
||||
echo "run function test: ${run_function_test}"
|
||||
|
||||
# Output the results for GitHub Actions
|
||||
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
|
||||
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ${{ github.event.pull_request.head.ref }}
|
||||
echo ${{ github.event.pull_request.base.ref }}
|
||||
echo ${{ github.event.pull_request.number }}
|
||||
|
||||
run-tests-on-linux:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
run_function_test: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' || false }}
|
||||
run_tdgpt_test: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || false }}
|
||||
source_branch: ${{ github.event.pull_request.head.ref }}
|
||||
target_branch: ${{ github.event.pull_request.base.ref }}
|
||||
pr_number: ${{ github.event.pull_request.number }}
|
||||
|
||||
run-tests-on-mac:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
source_branch: ${{ github.event.pull_request.head.ref }}
|
||||
target_branch: ${{ github.event.pull_request.base.ref }}
|
||||
pr_number: ${{ github.event.pull_request.number }}
|
||||
|
||||
run-tests-on-windows:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
source_branch: ${{ github.event.pull_request.head.ref }}
|
||||
target_branch: ${{ github.event.pull_request.base.ref }}
|
||||
pr_number: ${{ github.event.pull_request.number }}
|
|
@ -1,9 +1,12 @@
|
|||
name: taosKeeper Build
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
paths:
|
||||
- tools/keeper/**
|
||||
- 'tools/keeper/**'
|
||||
|
||||
jobs:
|
||||
build:
|
|
@ -6,13 +6,13 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
- 'enh/cmake-TD-33848'
|
||||
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
- 'tests/**'
|
||||
- '*.md'
|
||||
- '**/*.md'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
@ -20,9 +20,10 @@ concurrency:
|
|||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and test on ${{ matrix.os }}
|
||||
name: Run on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
|
@ -80,6 +81,8 @@ jobs:
|
|||
-DBUILD_KEEPER=true \
|
||||
-DBUILD_HTTP=false \
|
||||
-DBUILD_TEST=true \
|
||||
-DWEBSOCKET=true \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
|
@ -7,14 +7,13 @@ on:
|
|||
- '3.0'
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '*.md'
|
||||
|
||||
env:
|
||||
DOC_WKC: "/root/doc_ci_work"
|
||||
ZH_DOC_REPO: "docs.taosdata.com"
|
||||
EN_DOC_REPO: "docs.tdengine.com"
|
||||
TD_REPO: "TDengine"
|
||||
TOOLS_REPO: "taos-tools"
|
||||
DOC_WKC: '/root/doc_ci_work'
|
||||
ZH_DOC_REPO: 'docs.taosdata.com'
|
||||
EN_DOC_REPO: 'docs.tdengine.com'
|
||||
TD_REPO: 'TDengine'
|
||||
TOOLS_REPO: 'taos-tools'
|
||||
|
||||
jobs:
|
||||
build-doc:
|
|
@ -0,0 +1,68 @@
|
|||
name: TDengine Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
- 'tools/tdgpt/**'
|
||||
- 'source/libs/executor/src/forecastoperator.c'
|
||||
- 'source/libs/executor/src/anomalywindowoperator.c'
|
||||
- 'include/common/tanalytics.h'
|
||||
- 'source/common/src/tanalytics.c'
|
||||
- 'tests/parallel/tdgpt_cases.task'
|
||||
- 'tests/script/tsim/analytics'
|
||||
- '**/*.md'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
specified_source_branch:
|
||||
description: 'Enter the source branch name of TDengine'
|
||||
required: true
|
||||
type: string
|
||||
specified_target_branch:
|
||||
description: 'Enter the target branch name of TDengine'
|
||||
required: true
|
||||
type: string
|
||||
specified_pr_number:
|
||||
description: 'Enter the PR number of TDengine'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}-${{ github.event.inputs.specified_target_branch }}-${{ github.event.inputs.specified_pr_number }}-TDengine
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
|
||||
jobs:
|
||||
run-tests-on-linux:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
|
||||
if: ${{ github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'}}
|
||||
with:
|
||||
tdinternal: false
|
||||
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_pr_number }}
|
||||
|
||||
run-tests-on-mac:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_pr_number }}
|
||||
|
||||
run-tests-on-windows:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_source_branch }}
|
||||
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_target_branch }}
|
||||
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_pr_number }}
|
|
@ -0,0 +1,200 @@
|
|||
name: TDgpt Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.3.6'
|
||||
paths:
|
||||
- 'tools/tdgpt/**'
|
||||
- 'source/libs/executor/src/forecastoperator.c'
|
||||
- 'source/libs/executor/src/anomalywindowoperator.c'
|
||||
- 'include/common/tanalytics.h'
|
||||
- 'source/common/src/tanalytics.c'
|
||||
- 'tests/parallel/tdgpt_cases.task'
|
||||
- 'tests/script/tsim/analytics'
|
||||
|
||||
jobs:
|
||||
unit-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/tools/tdgpt
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install flake8 pytest pylint
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
|
||||
- name: Checking the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py') --exit-zero
|
||||
|
||||
- name: Checking the code with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
|
||||
- name: Run test cases with pytest
|
||||
run: |
|
||||
pytest
|
||||
|
||||
function-test:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, testing]
|
||||
|
||||
env:
|
||||
CONTAINER_NAME: 'taosd-test'
|
||||
WKDIR: '/var/lib/jenkins/workspace'
|
||||
WK: '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
SOURCE_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
steps:
|
||||
- name: Output the environment information
|
||||
run: |
|
||||
echo "::group::Environment Info"
|
||||
date
|
||||
hostname
|
||||
env
|
||||
echo "Runner: ${{ runner.name }}"
|
||||
echo "Workspace: ${{ env.WKDIR }}"
|
||||
git --version
|
||||
echo "${{ env.WKDIR }}/restore.sh -p PR-${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Prepare repositories
|
||||
run: |
|
||||
set -euo pipefail
|
||||
prepare_environment() {
|
||||
cd "$1"
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "$2"
|
||||
}
|
||||
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
|
||||
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
|
||||
|
||||
- name: Get latest codes and logs
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
cd ${{ env.WK }}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
|
||||
- name: Update submodule
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git submodule update --init --recursive
|
||||
|
||||
- name: Detect non-doc files changed
|
||||
run: |
|
||||
mkdir -p ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}
|
||||
cd ${{ env.WKC }}
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only \
|
||||
FETCH_HEAD \
|
||||
$(git merge-base FETCH_HEAD ${{ env.TARGET_BRANCH }}) | \
|
||||
grep -v "^docs/en/" | \
|
||||
grep -v "^docs/zh/" | \
|
||||
grep -v ".md$" | \
|
||||
tr '\n' ' ' || : \
|
||||
)
|
||||
echo $changed_files_non_doc > \
|
||||
${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt
|
||||
|
||||
- name: Check assert testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_assert_container.sh -d ${{ env.WKDIR }}
|
||||
|
||||
- name: Check void function testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_void_container.sh -d ${{ env.WKDIR }}
|
||||
|
||||
- name: Build docker container
|
||||
run: |
|
||||
date
|
||||
rm -rf ${{ env.WKC }}/debug
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
time ./container_build.sh -w ${{ env.WKDIR }} -e
|
||||
|
||||
- name: Get parameters for testing
|
||||
id: get_param
|
||||
run: |
|
||||
log_server_file="/home/log_server.json"
|
||||
timeout_cmd=""
|
||||
extra_param=""
|
||||
|
||||
if [ -f "$log_server_file" ]; then
|
||||
log_server_enabled=$(jq '.enabled' "$log_server_file")
|
||||
timeout_param=$(jq '.timeout' "$log_server_file")
|
||||
if [ "$timeout_param" != "null" ] && [ "$timeout_param" != "0" ]; then
|
||||
timeout_cmd="timeout $timeout_param"
|
||||
fi
|
||||
|
||||
if [ "$log_server_enabled" == "1" ]; then
|
||||
log_server=$(jq '.server' "$log_server_file" | sed 's/\\\"//g')
|
||||
if [ "$log_server" != "null" ] && [ "$log_server" != "" ]; then
|
||||
extra_param="-w $log_server"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
echo "timeout_cmd=$timeout_cmd" >> $GITHUB_OUTPUT
|
||||
echo "extra_param=$extra_param" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run function returns with a null pointer scan testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_scan_container.sh \
|
||||
-d ${{ env.WKDIR }} \
|
||||
-b ${{ env.PR_NUMBER }}_${{ github.run_number }} \
|
||||
-f ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt \
|
||||
${{ steps.get_param.outputs.extra_param }}
|
||||
|
||||
- name: Run tdgpt test cases
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
timeout 600 time ./run.sh -e \
|
||||
-m /home/m.json \
|
||||
-t tdgpt_cases.task \
|
||||
-b "${{ env.PR_NUMBER }}_${{ github.run_number }}" \
|
||||
-l ${{ env.WKDIR }}/log \
|
||||
-o 300 ${{ steps.get_param.outputs.extra_param }}
|
|
@ -0,0 +1,41 @@
|
|||
name: TDgpt Update Service
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 00 * * *'
|
||||
|
||||
env:
|
||||
WKC: "/root/TDengine"
|
||||
|
||||
jobs:
|
||||
update-service:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, tdgpt-anode-service]
|
||||
steps:
|
||||
- name: Update TDengine codes
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}
|
||||
git checkout 3.0
|
||||
|
||||
- name: Package the TDGpt Anode Service
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}/tools/tdgpt/script && ./release.sh
|
||||
|
||||
- name: Reinstall and restart the TDGpt Anode Service
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}/tools/tdgpt/release
|
||||
if [[ -f "TDengine-enterprise-anode-1.0.1.tar.gz" ]]; then
|
||||
tar -xzf TDengine-enterprise-anode-1.0.1.tar.gz
|
||||
cd TDengine-enterprise-anode-1.0.1
|
||||
./install.sh
|
||||
fi
|
||||
systemctl restart taosanoded
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if [[ -f ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1 ]] then rm -rf ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1; fi
|
|
@ -51,7 +51,6 @@ pysim/
|
|||
tests/script/api/batchprepare
|
||||
taosadapter
|
||||
taosadapter-debug
|
||||
tools/taos-tools/*
|
||||
tools/taosws-rs/*
|
||||
tools/taosadapter/*
|
||||
tools/upx*
|
||||
|
@ -140,7 +139,6 @@ tags
|
|||
*CMakeCache*
|
||||
*CMakeFiles*
|
||||
.history/
|
||||
*.txt
|
||||
*.tcl
|
||||
*.pc
|
||||
contrib/geos
|
||||
|
|
|
@ -19,21 +19,21 @@ include(${TD_SUPPORT_DIR}/cmake.options)
|
|||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
enable_testing()
|
||||
|
||||
set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES OFF)
|
||||
|
||||
add_library(api INTERFACE)
|
||||
target_include_directories(api INTERFACE "include/client")
|
||||
|
||||
add_subdirectory(contrib)
|
||||
add_subdirectory(source)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
include(CTest)
|
||||
enable_testing()
|
||||
add_subdirectory(examples/c)
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
add_library(api INTERFACE)
|
||||
target_include_directories(api INTERFACE "include/client")
|
||||
add_subdirectory(source)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
|
10
Jenkinsfile2
|
@ -112,16 +112,6 @@ def build_pre_docs(){
|
|||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
'''
|
||||
|
||||
sh '''
|
||||
cd ${DOC_WKC}/${tools_repo}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git fetch
|
||||
git remote prune origin
|
||||
git checkout ''' + env.CHANGE_TARGET + '''
|
||||
git pull >/dev/null
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
|
|
14
README.md
|
@ -29,9 +29,9 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine
|
|||
1. [Introduction](#1-introduction)
|
||||
1. [Documentation](#2-documentation)
|
||||
1. [Prerequisites](#3-prerequisites)
|
||||
- [3.1 Prerequisites On Linux](#31-on-linux)
|
||||
- [3.2 Prerequisites On macOS](#32-on-macos)
|
||||
- [3.3 Prerequisites On Windows](#33-on-windows)
|
||||
- [3.1 Prerequisites On Linux](#31-prerequisites-on-linux)
|
||||
- [3.2 Prerequisites On macOS](#32-prerequisites-on-macos)
|
||||
- [3.3 Prerequisites On Windows](#33-prerequisites-on-windows)
|
||||
- [3.4 Clone the repo](#34-clone-the-repo)
|
||||
1. [Building](#4-building)
|
||||
- [4.1 Build on Linux](#41-build-on-linux)
|
||||
|
@ -70,6 +70,8 @@ TDengine is an open source, high-performance, cloud native [time-series database
|
|||
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
|
||||
For the latest TDengine component TDgpt, please refer to [TDgpt README](./tools/tdgpt/README.md) for details.
|
||||
|
||||
# 2. Documentation
|
||||
|
||||
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
|
||||
|
@ -84,7 +86,7 @@ At the moment, TDengine server supports running on Linux/Windows/MacOS systems.
|
|||
|
||||
If you want to compile taosAdapter or taosKeeper, you need to install Go 1.18 or above.
|
||||
|
||||
## 3.1 On Linux
|
||||
## 3.1 Prerequisites on Linux
|
||||
|
||||
<details>
|
||||
|
||||
|
@ -109,7 +111,7 @@ yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatom
|
|||
|
||||
</details>
|
||||
|
||||
## 3.2 On macOS
|
||||
## 3.2 Prerequisites on macOS
|
||||
|
||||
<details>
|
||||
|
||||
|
@ -123,7 +125,7 @@ brew install argp-standalone gflags pkgconfig
|
|||
|
||||
</details>
|
||||
|
||||
## 3.3 On Windows
|
||||
## 3.3 Prerequisites on Windows
|
||||
|
||||
<details>
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ option(
|
|||
)
|
||||
|
||||
IF(${TD_WINDOWS})
|
||||
|
||||
IF(NOT TD_ASTRA)
|
||||
MESSAGE("build pthread Win32")
|
||||
option(
|
||||
BUILD_PTHREAD
|
||||
|
@ -63,7 +63,7 @@ IF(${TD_WINDOWS})
|
|||
"If build crashdump on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN_64)
|
||||
IF(${BUILD_TEST})
|
||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||
|
@ -71,58 +71,102 @@ ELSEIF (TD_DARWIN_64)
|
|||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_WITH_LEMON
|
||||
"If build with lemon"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_UDF
|
||||
"If build with UDF"
|
||||
ON
|
||||
)
|
||||
|
||||
IF(NOT TD_ASTRA)
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build with geos"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
RUST_BINDINGS
|
||||
"If build with rust-bindings"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
BUILD_PCRE2
|
||||
"If build with pcre2"
|
||||
ON
|
||||
)
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
JEMALLOC_ENABLED
|
||||
"If build with jemalloc"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
BUILD_SANITIZER
|
||||
"If build sanitizer"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
BUILD_ADDR2LINE
|
||||
"If build addr2line"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
BUILD_WITH_LEVELDB
|
||||
"If build with leveldb"
|
||||
OFF
|
||||
)
|
||||
)
|
||||
|
||||
option(
|
||||
option(
|
||||
BUILD_WITH_ROCKSDB
|
||||
"If build with rocksdb"
|
||||
ON
|
||||
)
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LZ4
|
||||
"If build with lz4"
|
||||
ON
|
||||
)
|
||||
ELSE ()
|
||||
|
||||
option(
|
||||
BUILD_WITH_LZMA2
|
||||
"If build with lzma2"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
ADD_DEFINITIONS(-DUSE_AUDIT)
|
||||
ADD_DEFINITIONS(-DUSE_GEOS)
|
||||
ADD_DEFINITIONS(-DUSE_UDF)
|
||||
ADD_DEFINITIONS(-DUSE_STREAM)
|
||||
ADD_DEFINITIONS(-DUSE_PRCE2)
|
||||
ADD_DEFINITIONS(-DUSE_RSMA)
|
||||
ADD_DEFINITIONS(-DUSE_TSMA)
|
||||
ADD_DEFINITIONS(-DUSE_TQ)
|
||||
ADD_DEFINITIONS(-DUSE_TOPIC)
|
||||
ADD_DEFINITIONS(-DUSE_MONITOR)
|
||||
ADD_DEFINITIONS(-DUSE_REPORT)
|
||||
|
||||
IF(${TD_ASTRA_RPC})
|
||||
ADD_DEFINITIONS(-DTD_ASTRA_RPC)
|
||||
ENDIF()
|
||||
|
||||
IF(${TD_LINUX})
|
||||
|
||||
|
@ -150,6 +194,12 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LZMA2
|
||||
"If build with lzma2"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
IF(NOT TD_ENTERPRISE)
|
||||
|
@ -191,6 +241,14 @@ option(BUILD_WITH_COS "If build with cos" OFF)
|
|||
|
||||
ENDIF ()
|
||||
|
||||
IF(${TAOSD_INTEGRATED})
|
||||
add_definitions(-DTAOSD_INTEGRATED)
|
||||
ENDIF()
|
||||
|
||||
IF(${TD_AS_LIB})
|
||||
add_definitions(-DTD_AS_LIB)
|
||||
ENDIF()
|
||||
|
||||
option(
|
||||
BUILD_WITH_SQLITE
|
||||
"If build with sqlite"
|
||||
|
@ -209,6 +267,14 @@ option(
|
|||
off
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_NURAFT
|
||||
"If build with NuRaft"
|
||||
OFF
|
||||
)
|
||||
|
||||
IF(NOT TD_ASTRA)
|
||||
|
||||
option(
|
||||
BUILD_WITH_UV
|
||||
"If build with libuv"
|
||||
|
@ -242,6 +308,7 @@ option(
|
|||
"If use invertedIndex"
|
||||
ON
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_RELEASE
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
|
||||
# xz
|
||||
|
||||
if (${TD_LINUX})
|
||||
if (${BUILD_WITH_LZMA2})
|
||||
ExternalProject_Add(lzma2
|
||||
GIT_REPOSITORY https://github.com/conor42/fast-lzma2.git
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/lzma2"
|
||||
|
|
|
@ -92,7 +92,9 @@ if(${BUILD_TEST})
|
|||
endif(${BUILD_TEST})
|
||||
|
||||
# lz4
|
||||
cat("${TD_SUPPORT_DIR}/lz4_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
if(${BUILD_WITH_LZ4})
|
||||
cat("${TD_SUPPORT_DIR}/lz4_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_LZ4})
|
||||
|
||||
# zlib
|
||||
cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -186,16 +188,22 @@ if(${BUILD_PCRE2})
|
|||
cat("${TD_SUPPORT_DIR}/pcre2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
find_program(C_COMPILER_LEMON NAMES gcc)
|
||||
if(C_COMPILER_LEMON)
|
||||
# lemon
|
||||
if(${BUILD_WITH_LEMON})
|
||||
if(${TD_ACORE})
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
else()
|
||||
find_program(C_COMPILER_LEMON NAMES gcc)
|
||||
endif()
|
||||
if(C_COMPILER_LEMON)
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
else()
|
||||
else()
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# lemon
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers
|
||||
IF(${TD_DARWIN})
|
||||
|
@ -273,11 +281,13 @@ unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
|||
# endif()
|
||||
|
||||
# lz4
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
if(${BUILD_WITH_LZ4})
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
lz4_static
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/lz4/lib
|
||||
)
|
||||
)
|
||||
endif(${BUILD_WITH_LZ4})
|
||||
|
||||
# zlib
|
||||
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
|
||||
|
|
|
@ -30,7 +30,7 @@ stream_options: {
|
|||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,13 @@ Under normal circumstances, stream computation tasks will not process data that
|
|||
|
||||
By enabling the fill_history option, the created stream computation task will be capable of processing data written before, during, and after the creation of the stream. This means that data written either before or after the creation of the stream will be included in the scope of stream computation, thus ensuring data integrity and consistency. This setting provides users with greater flexibility, allowing them to flexibly handle historical and new data according to actual needs.
|
||||
|
||||
Tips:
|
||||
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
|
||||
|
||||
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
|
||||
|
||||
- Besides, do not create multiple streams asynchronously at the same time, as transaction conflicts may cause subsequent streams to fail.
|
||||
|
||||
For example, create a stream to count the number of data entries generated by all smart meters every 10s, and also calculate historical data. SQL as follows:
|
||||
|
||||
```sql
|
||||
|
|
|
@ -108,7 +108,7 @@ The header is the first line of the CSV file, with the following rules:
|
|||
(1) The header of the CSV can configure the following columns:
|
||||
|
||||
| Number | Column Name | Description | Required | Default Behavior |
|
||||
| ------ | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|--------|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -------- |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 1 | point_id | The id of the data point on the OPC UA server | Yes | None |
|
||||
| 2 | stable | The corresponding supertable for the data point in TDengine | Yes | None |
|
||||
| 3 | tbname | The corresponding subtable for the data point in TDengine | Yes | None |
|
||||
|
@ -117,11 +117,13 @@ The header is the first line of the CSV file, with the following rules:
|
|||
| 6 | value_transform | The transformation function executed in taosX for the collected value of the data point | No | Do not transform the collected value uniformly |
|
||||
| 7 | type | The data type of the collected value of the data point | No | Use the original type of the collected value as the data type in TDengine |
|
||||
| 8 | quality_col | The column name in TDengine corresponding to the quality of the collected value | No | Do not add a quality column in TDengine uniformly |
|
||||
| 9 | ts_col | The original timestamp column of the data point in TDengine | No | If both ts_col and received_ts_col are non-empty, use the former as the timestamp column; if one of ts_col or received_ts_col is non-empty, use the non-empty column as the timestamp column; if both are empty, use the original timestamp of the data point as the timestamp column with the default name `ts`. |
|
||||
| 10 | received_ts_col | The timestamp column in TDengine when the data point value is received | No | Same as above |
|
||||
| 11 | ts_transform | The transformation function executed in taosX for the original timestamp of the data point | No | Do not transform the original timestamp of the data point uniformly |
|
||||
| 12 | received_ts_transform | The transformation function executed in taosX for the received timestamp of the data point | No | Do not transform the received timestamp of the data point uniformly |
|
||||
| 13 | tag::VARCHAR(200)::name | The Tag column corresponding to the data point in TDengine. Here `tag` is a reserved keyword indicating that this column is a tag; `VARCHAR(200)` indicates the type of tag; `name` is the actual name of the tag. | No | If 1 or more tag columns are configured, use the configured tag columns; if no tag columns are configured and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured and stable does not exist in TDengine, automatically add the following 2 tag columns: tag::VARCHAR(256)::point_id and tag::VARCHAR(256)::point_name |
|
||||
| 9 | ts_col | The original timestamp column of the data point in TDengine | No | ts_col, request_ts, received_ts these 3 columns, when there are more than 2 columns, the leftmost column is used as the primary key in TDengine. |
|
||||
| 10 | request_ts_col | The timestamp column in TDengine when the data point value is request | No | Same as above |
|
||||
| 11 | received_ts_col | The timestamp column in TDengine when the data point value is received | No | Same as above |
|
||||
| 12 | ts_transform | The transformation function executed in taosX for the original timestamp of the data point | No | Do not transform the original timestamp of the data point uniformly |
|
||||
| 13 | request_ts_transform | The transformation function executed in taosX for the request timestamp of the data point | No | Do not transform the original timestamp of the data point uniformly |
|
||||
| 14 | received_ts_transform | The transformation function executed in taosX for the received timestamp of the data point | No | Do not transform the received timestamp of the data point uniformly |
|
||||
| 15 | tag::VARCHAR(200)::name | The Tag column corresponding to the data point in TDengine. Here `tag` is a reserved keyword indicating that this column is a tag; `VARCHAR(200)` indicates the type of tag; `name` is the actual name of the tag. | No | If 1 or more tag columns are configured, use the configured tag columns; if no tag columns are configured and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured and stable does not exist in TDengine, automatically add the following 2 tag columns: tag::VARCHAR(256)::point_id and tag::VARCHAR(256)::point_name |
|
||||
|
||||
(2) In the CSV Header, there cannot be duplicate columns;
|
||||
|
||||
|
@ -138,7 +140,7 @@ Each Row in the CSV file configures an OPC data point. The rules for Rows are as
|
|||
(1) Correspondence with columns in the Header
|
||||
|
||||
| Number | Column in Header | Type of Value | Value Range | Mandatory | Default Value |
|
||||
| ------ | ----------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------------------ |
|
||||
|--------|-------------------------| ------------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| --------- |---------------------------------------|
|
||||
| 1 | point_id | String | Strings like `ns=3;i=1005`, must meet the OPC UA ID specification, i.e., include ns and id parts | Yes | |
|
||||
| 2 | enable | int | 0: Do not collect this point, and delete the corresponding subtable in TDengine before the OPC DataIn task starts; 1: Collect this point, do not delete the subtable before the OPC DataIn task starts. | No | 1 |
|
||||
| 3 | stable | String | Any string that meets the TDengine supertable naming convention; if special character `.` exists, replace with underscore if `{type}` exists: if type in CSV file is not empty, replace with the value of type if type is empty, replace with the original type of the collected value | Yes | |
|
||||
|
@ -148,10 +150,12 @@ Each Row in the CSV file configures an OPC data point. The rules for Rows are as
|
|||
| 7 | type | String | Supported types include: b/bool/i8/tinyint/i16/small/inti32/int/i64/bigint/u8/tinyint unsigned/u16/smallint unsigned/u32/int unsigned/u64/bigint unsigned/f32/float/f64/double/timestamp/timestamp(ms)/timestamp(us)/timestamp(ns)/json | No | Original type of the data point value |
|
||||
| 8 | quality_col | String | Column name that meets TDengine naming convention | No | None |
|
||||
| 9 | ts_col | String | Column name that meets TDengine naming convention | No | ts |
|
||||
| 10 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 11 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; ts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; ts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | No | None |
|
||||
| 12 | received_ts_transform | String | No | None | |
|
||||
| 13 | tag::VARCHAR(200)::name | String | The value inside a tag, when the tag type is VARCHAR, can be in Chinese | No | NULL |
|
||||
| 10 | request_ts_col | String | Column name that meets TDengine naming convention | No | qts |
|
||||
| 11 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 12 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; ts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; ts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | No | None |
|
||||
| 13 | request_ts_transform | String | Supports +, -, *, /, % operators, for example: qts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; qts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; qts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | No | None |
|
||||
| 14 | received_ts_transform | String | Supports +, -, *, /, % operators, for example: qts / 1000* 1000, sets the last 3 digits of a timestamp in ms to 0; qts + 8 *3600* 1000, adds 8 hours to a timestamp in ms; qts - 8 *3600* 1000, subtracts 8 hours from a timestamp in ms; | None | None |
|
||||
| 15 | tag::VARCHAR(200)::name | String | The value inside a tag, when the tag type is VARCHAR, can be in Chinese | No | NULL |
|
||||
|
||||
(2) `point_id` is unique throughout the DataIn task, meaning: in an OPC DataIn task, a data point can only be written to one subtable in TDengine. If you need to write a data point to multiple subtables, you need to create multiple OPC DataIn tasks;
|
||||
|
||||
|
@ -171,7 +175,7 @@ Data points can be filtered by configuring **Root Node ID**, **Namespace**, **Re
|
|||
|
||||
Configure **Supertable Name**, **Table Name** to specify the supertable and subtable where the data will be written.
|
||||
|
||||
Configure **Primary Key Column**, choose `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choose `received_ts` to use the data's reception timestamp as the primary key in TDengine. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
Configure **Primary Key Column**, choose `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choose `request_ts` to use the data's request timestamp as the primary key in TDengine; choose `received_ts` to use the data's reception timestamp as the primary key in TDengine. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep5} alt=""/>
|
||||
|
|
|
@ -82,7 +82,7 @@ The header is the first line of the CSV file, with the following rules:
|
|||
(1) The header of the CSV can configure the following columns:
|
||||
|
||||
| No. | Column Name | Description | Required | Default Behavior |
|
||||
| ---- | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|-----|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| 1 | tag_name | The id of the data point on the OPC DA server | Yes | None |
|
||||
| 2 | stable | The supertable in TDengine corresponding to the data point | Yes | None |
|
||||
| 3 | tbname | The subtable in TDengine corresponding to the data point | Yes | None |
|
||||
|
@ -91,11 +91,13 @@ The header is the first line of the CSV file, with the following rules:
|
|||
| 6 | value_transform | The transform function executed in taosX for the collected value of the data point | No | Do not perform a transform on the collected value |
|
||||
| 7 | type | The data type of the collected value of the data point | No | Use the original type of the collected value as the data type in TDengine |
|
||||
| 8 | quality_col | The column name in TDengine corresponding to the quality of the collected value | No | Do not add a quality column in TDengine |
|
||||
| 9 | ts_col | The timestamp column in TDengine corresponding to the original timestamp of the data point | No | If both ts_col and received_ts_col are non-empty, use the former as the timestamp column; if one of ts_col or received_ts_col is non-empty, use the non-empty column as the timestamp column; if both are empty, use the original timestamp of the data point as the timestamp column in TDengine, with the default column name ts. |
|
||||
| 10 | received_ts_col | The timestamp column in TDengine corresponding to the timestamp when the data point value was received | No | |
|
||||
| 11 | ts_transform | The transform function executed in taosX for the original timestamp of the data point | No | Do not perform a transform on the original timestamp of the data point |
|
||||
| 12 | received_ts_transform | The transform function executed in taosX for the received timestamp of the data point | No | Do not perform a transform on the received timestamp of the data point |
|
||||
| 13 | tag::VARCHAR(200)::name | The Tag column in TDengine corresponding to the data point. Where `tag` is a reserved keyword, indicating that this column is a tag column; `VARCHAR(200)` indicates the type of this tag, which can also be other legal types; `name` is the actual name of this tag. | No | If configuring more than one tag column, use the configured tag columns; if no tag columns are configured, and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured, and stable does not exist in TDengine, automatically add the following two tag columns by default: tag::VARCHAR(256)::point_idtag::VARCHAR(256)::point_name |
|
||||
| 9 | ts_col | The timestamp column in TDengine corresponding to the original timestamp of the data point | No | ts_col, request_ts, received_ts these 3 columns, when there are more than 2 columns, the leftmost column is used as the primary key in TDengine. |
|
||||
| 10 | request_ts_col | The timestamp column in TDengine corresponding to the timestamp when the data point value was request | No | Same as above |
|
||||
| 11 | received_ts_col | The timestamp column in TDengine corresponding to the timestamp when the data point value was received | No | Same as above |
|
||||
| 12 | ts_transform | The transform function executed in taosX for the original timestamp of the data point | No | Do not perform a transform on the original timestamp of the data point |
|
||||
| 13 | request_ts_transform | The transform function executed in taosX for the request timestamp of the data point | No | Do not perform a transform on the received timestamp of the data point |
|
||||
| 14 | received_ts_transform | The transform function executed in taosX for the received timestamp of the data point | No | Do not perform a transform on the received timestamp of the data point |
|
||||
| 15 | tag::VARCHAR(200)::name | The Tag column in TDengine corresponding to the data point. Where `tag` is a reserved keyword, indicating that this column is a tag column; `VARCHAR(200)` indicates the type of this tag, which can also be other legal types; `name` is the actual name of this tag. | No | If configuring more than one tag column, use the configured tag columns; if no tag columns are configured, and stable exists in TDengine, use the tags of the stable in TDengine; if no tag columns are configured, and stable does not exist in TDengine, automatically add the following two tag columns by default: tag::VARCHAR(256)::point_idtag::VARCHAR(256)::point_name |
|
||||
|
||||
(2) In the CSV Header, there cannot be duplicate columns;
|
||||
|
||||
|
@ -112,7 +114,7 @@ Each Row in the CSV file configures an OPC data point. The rules for Rows are as
|
|||
(1) Correspondence with columns in the Header
|
||||
|
||||
| Number | Column in Header | Type of Value | Range of Values | Mandatory | Default Value |
|
||||
| ------ | ----------------------- | ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------------------ |
|
||||
|--------|-------------------------| ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------------------ |
|
||||
| 1 | tag_name | String | Strings like `root.parent.temperature`, must meet the OPC DA ID specification | Yes | |
|
||||
| 2 | enable | int | 0: Do not collect this point, and delete the corresponding subtable in TDengine before the OPC DataIn task starts; 1: Collect this point, do not delete the subtable before the OPC DataIn task starts. | No | 1 |
|
||||
| 3 | stable | String | Any string that meets the TDengine supertable naming convention; if there are special characters `.`, replace with underscore. If `{type}` exists: if type in CSV file is not empty, replace with the value of type; if empty, replace with the original type of the collected value | Yes | |
|
||||
|
@ -122,10 +124,12 @@ Each Row in the CSV file configures an OPC data point. The rules for Rows are as
|
|||
| 7 | type | String | Supported types include: b/bool/i8/tinyint/i16/smallint/i32/int/i64/bigint/u8/tinyint unsigned/u16/smallint unsigned/u32/int unsigned/u64/bigint unsigned/f32/floatf64/double/timestamp/timestamp(ms)/timestamp(us)/timestamp(ns)/json | No | Original type of data point value |
|
||||
| 8 | quality_col | String | Column name that meets TDengine naming convention | No | None |
|
||||
| 9 | ts_col | String | Column name that meets TDengine naming convention | No | ts |
|
||||
| 10 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 11 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a ms unit timestamp to 0; ts + 8 *3600* 1000, adds 8 hours to a ms precision timestamp; ts - 8 *3600* 1000, subtracts 8 hours from a ms precision timestamp; | No | None |
|
||||
| 12 | received_ts_transform | String | No | None | |
|
||||
| 13 | tag::VARCHAR(200)::name | String | The value in tag, when the tag type is VARCHAR, it can be in Chinese | No | NULL |
|
||||
| 10 | request_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 11 | received_ts_col | String | Column name that meets TDengine naming convention | No | rts |
|
||||
| 12 | ts_transform | String | Supports +, -, *, /, % operators, for example: ts / 1000* 1000, sets the last 3 digits of a ms unit timestamp to 0; ts + 8 *3600* 1000, adds 8 hours to a ms precision timestamp; ts - 8 *3600* 1000, subtracts 8 hours from a ms precision timestamp; | No | None |
|
||||
| 13 | request_ts_transform | String | No | None | |
|
||||
| 14 | received_ts_transform | String | No | None | |
|
||||
| 15 | tag::VARCHAR(200)::name | String | The value in tag, when the tag type is VARCHAR, it can be in Chinese | No | NULL |
|
||||
|
||||
(2) `tag_name` is unique throughout the DataIn task, that is: in an OPC DataIn task, a data point can only be written to one subtable in TDengine. If you need to write a data point to multiple subtables, you need to create multiple OPC DataIn tasks;
|
||||
|
||||
|
@ -145,7 +149,7 @@ Data points can be filtered by configuring the **Root Node ID** and **Regular Ex
|
|||
|
||||
Configure **Supertable Name** and **Table Name** to specify the supertable and subtable where the data will be written.
|
||||
|
||||
Configure **Primary Key Column**, choosing `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choosing `received_ts` to use the timestamp when the data is received as the primary key. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
Configure **Primary Key Column**, choosing `origin_ts` to use the original timestamp of the OPC data point as the primary key in TDengine; choosing `request_ts` to use the timestamp when the data is request as the primary key; choosing `received_ts` to use the timestamp when the data is received as the primary key. Configure **Primary Key Alias** to specify the name of the TDengine timestamp column.
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep4} alt=""/>
|
||||
|
|
|
@ -339,20 +339,272 @@ Helm operates Kubernetes using kubectl and kubeconfig configurations, which can
|
|||
The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
|
||||
|
||||
```shell
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-enterprise-3.5.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-3.5.0.tgz
|
||||
```
|
||||
|
||||
Note that it's for the enterprise edition, and the community edition is not yet available.
|
||||
|
||||
Follow the steps below to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
# Edit the values.yaml file to set the topology of the cluster
|
||||
vim values.yaml
|
||||
helm install tdengine tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install tdengine tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
#### Case 1: Simple 1-node Deployment
|
||||
If you are using community images, you can use the following command to install TDengine with Helm Chart:
|
||||
|
||||
<details>
|
||||
<summary>Helm Chart Use Cases for Community</summary>
|
||||
|
||||
#### Community Case 1: Simple 1-node Deployment
|
||||
|
||||
The following is a simple example of deploying a single-node TDengine cluster using Helm.
|
||||
|
||||
```yaml
|
||||
# This example is a simple deployment with one server replica.
|
||||
name: "tdengine"
|
||||
|
||||
image:
|
||||
repository: # Leave a trailing slash for the repository, or "" for no repository
|
||||
server: tdengine/tdengine:latest
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
labels:
|
||||
app: "tdengine"
|
||||
# Add more labels as needed.
|
||||
|
||||
services:
|
||||
server:
|
||||
type: ClusterIP
|
||||
replica: 1
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp: [6041, 6030, 6060]
|
||||
# UDP range, optional
|
||||
udp:
|
||||
volumes:
|
||||
- name: data
|
||||
mountPath: /var/lib/taos
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
- name: log
|
||||
mountPath: /var/log/taos/
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
files:
|
||||
- name: cfg # must be lower case.
|
||||
mountPath: /etc/taos/taos.cfg
|
||||
content: |
|
||||
dataDir /var/lib/taos/
|
||||
logDir /var/log/taos/
|
||||
```
|
||||
|
||||
Let's explain the above configuration:
|
||||
|
||||
- name: The name of the deployment, here it is "tdengine".
|
||||
- image:
|
||||
- repository: The image repository address, remember to leave a trailing slash for the repository, or set it to an empty string to use docker.io.
|
||||
- server: The specific name and tag of the server image. You need to ask your business partner for the TDengine Enterprise image.
|
||||
- timezone: Set the timezone, here it is "Asia/Shanghai".
|
||||
- labels: Add labels to the deployment, here is an app label with the value "tdengine", more labels can be added as needed.
|
||||
- services:
|
||||
- server: Configure the server service.
|
||||
- type: The service type, here it is **ClusterIP**.
|
||||
- replica: The number of replicas, here it is 1.
|
||||
- ports: Configure the ports of the service.
|
||||
- tcp: The required TCP port range, here it is [6041, 6030, 6060].
|
||||
- udp: The optional UDP port range, which is not configured here.
|
||||
- volumes: Configure the volumes.
|
||||
- name: The name of the volume, here there are two volumes, data and log.
|
||||
- mountPath: The mount path of the volume.
|
||||
- spec: The specification of the volume.
|
||||
- storageClassName: The storage class name, here it is **local-path**.
|
||||
- accessModes: The access mode, here it is **ReadWriteOnce**.
|
||||
- resources.requests.storage: The requested storage size, here it is **10Gi**.
|
||||
- files: Configure the files to mount in TDengine server.
|
||||
- name: The name of the file, here it is **cfg**.
|
||||
- mountPath: The mount path of the file, which is **taos.cfg**.
|
||||
- content: The content of the file, here the **dataDir** and **logDir** are configured.
|
||||
|
||||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install simple tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
After installation, you can see the instructions to see the status of the TDengine cluster:
|
||||
|
||||
```shell
|
||||
NAME: simple
|
||||
LAST DEPLOYED: Sun Feb 9 13:40:00 2025 default
|
||||
STATUS: deployed
|
||||
REVISION: 1
|
||||
TEST SUITE: None
|
||||
NOTES:
|
||||
1. Get first POD name:
|
||||
|
||||
export POD_NAME=$(kubectl get pods --namespace default \
|
||||
-l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=simple" -o jsonpath="{.items[0].metadata.name}")
|
||||
|
||||
2. Show dnodes/mnodes:
|
||||
|
||||
kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
|
||||
|
||||
3. Run into TDengine CLI:
|
||||
|
||||
kubectl --namespace default exec -it $POD_NAME -- taos
|
||||
```
|
||||
|
||||
Follow the instructions to check the status of the TDengine cluster:
|
||||
|
||||
```shell
|
||||
root@u1-58:/data1/projects/helm# kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
|
||||
Welcome to the TDengine Command Line Interface, Client Version:3.3.5.8
|
||||
Copyright (c) 2023 by TDengine, all rights reserved.
|
||||
|
||||
taos> show dnodes; show mnodes
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note |
|
||||
=============================================================================================================================================================================
|
||||
1 | oss-tdengine-0.oss-tdengine... | 0 | 21 | ready | 2025-03-12 19:05:42.224 | 2025-03-12 19:05:42.044 | |
|
||||
Query OK, 1 row(s) in set (0.002545s)
|
||||
|
||||
id | endpoint | role | status | create_time | role_time |
|
||||
==================================================================================================================================
|
||||
1 | oss-tdengine-0.oss-tdengine... | leader | ready | 2025-03-12 19:05:42.239 | 2025-03-12 19:05:42.137 |
|
||||
Query OK, 1 row(s) in set (0.001343s)
|
||||
```
|
||||
|
||||
To clean up the TDengine cluster, use the following command:
|
||||
|
||||
```shell
|
||||
helm uninstall simple
|
||||
kubectl delete pvc -l app.kubernetes.io/instance=simple
|
||||
```
|
||||
|
||||
#### Community Case 2: 3-replica Deployment with Single taosX
|
||||
|
||||
```yaml
|
||||
# This example shows how to deploy a 3-replica TDengine cluster with separate taosx/explorer service.
|
||||
# Users should know that the explorer/taosx service is not cluster-ready, so it is recommended to deploy it separately.
|
||||
name: "tdengine"
|
||||
|
||||
image:
|
||||
repository: # Leave a trailing slash for the repository, or "" for no repository
|
||||
server: tdengine/tdengine:latest
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
labels:
|
||||
# Add more labels as needed.
|
||||
|
||||
services:
|
||||
server:
|
||||
type: ClusterIP
|
||||
replica: 3
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp: [6041, 6030]
|
||||
# UDP range, optional
|
||||
udp:
|
||||
volumes:
|
||||
- name: data
|
||||
mountPath: /var/lib/taos
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
- name: log
|
||||
mountPath: /var/log/taos/
|
||||
spec:
|
||||
storageClassName: "local-path"
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
```
|
||||
|
||||
You can see that the configuration is similar to the first one, with the addition of the taosx configuration. The taosx service is configured with similar storage configuration as the server service, and the server service is configured with 3 replicas. Since the taosx service is not cluster-ready, it is recommended to deploy it separately.
|
||||
|
||||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install replica3 tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
To clean up the TDengine cluster, use the following command:
|
||||
|
||||
```shell
|
||||
helm uninstall replica3
|
||||
kubectl delete pvc -l app.kubernetes.io/instance=replica3
|
||||
```
|
||||
|
||||
You can use the following command to expose the explorer service to the outside world with ingress:
|
||||
|
||||
```shell
|
||||
tee replica3-ingress.yaml <<EOF
|
||||
# This is a helm chart example for deploying 3 replicas of TDengine Explorer
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: replica3-ingress
|
||||
namespace: default
|
||||
spec:
|
||||
rules:
|
||||
- host: replica3.local.tdengine.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: replica3-tdengine-taosx
|
||||
port:
|
||||
number: 6060
|
||||
EOF
|
||||
|
||||
kubectl apply -f replica3-ingress.yaml
|
||||
```
|
||||
|
||||
Use `kubectl get ingress` to view the ingress service.
|
||||
|
||||
```shell
|
||||
root@server:/data1/projects/helm# kubectl get ingress
|
||||
NAME CLASS HOSTS ADDRESS PORTS AGE
|
||||
replica3-ingress nginx replica3.local.tdengine.com 192.168.1.58 80 48m
|
||||
```
|
||||
|
||||
You can configure the domain name resolution to point to the ingress service's external IP address. For example, add the following line to the hosts file:
|
||||
|
||||
```conf
|
||||
192.168.1.58 replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
Now you can access the explorer service through the domain name `replica3.local.tdengine.com`.
|
||||
|
||||
```shell
|
||||
curl http://replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
With TDengine Enterprise images, you can use the following command to install TDengine with Helm Chart:
|
||||
|
||||
<details>
|
||||
<summary>Helm Chart Use Cases for Enterprise</summary>
|
||||
|
||||
#### Enterprise Case 1: Simple 1-node Deployment
|
||||
|
||||
The following is a simple example of deploying a single-node TDengine cluster using Helm.
|
||||
|
||||
|
@ -435,7 +687,7 @@ Let's explain the above configuration:
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install simple tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install simple tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
After installation, you can see the instructions to see the status of the TDengine cluster:
|
||||
|
@ -487,7 +739,7 @@ helm uninstall simple
|
|||
kubectl delete pvc -l app.kubernetes.io/instance=simple
|
||||
```
|
||||
|
||||
#### Case 2: Tiered-Storage Deployment
|
||||
#### Enterprise Case 2: Tiered-Storage Deployment
|
||||
|
||||
The following is an example of deploying a TDengine cluster with tiered storage using Helm.
|
||||
|
||||
|
@ -563,10 +815,10 @@ You can see that the configuration is similar to the previous one, with the addi
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install tiered tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install tiered tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
#### Case 3: 2-replica Deployment
|
||||
#### Enterprise Case 3: 2-replica Deployment
|
||||
|
||||
TDengine support 2-replica deployment with an arbitrator, which can be configured as follows:
|
||||
|
||||
|
@ -634,7 +886,7 @@ services:
|
|||
|
||||
You can see that the configuration is similar to the first one, with the addition of the arbitrator configuration. The arbitrator service is configured with the same storage as the server service, and the server service is configured with 2 replicas (the arbitrator should be 1 replica and not able to be changed).
|
||||
|
||||
#### Case 4: 3-replica Deployment with Single taosX
|
||||
#### Enterprise Case 4: 3-replica Deployment with Single taosX
|
||||
|
||||
```yaml
|
||||
# This example shows how to deploy a 3-replica TDengine cluster with separate taosx/explorer service.
|
||||
|
@ -761,7 +1013,7 @@ You can see that the configuration is similar to the first one, with the additio
|
|||
After configuring the values.yaml file, use the following command to install the TDengine Chart:
|
||||
|
||||
```shell
|
||||
helm install replica3 tdengine-enterprise-3.5.0.tgz -f values.yaml
|
||||
helm install replica3 tdengine-3.5.0.tgz -f values.yaml
|
||||
```
|
||||
|
||||
You can use the following command to expose the explorer service to the outside world with ingress:
|
||||
|
@ -810,3 +1062,5 @@ Now you can access the explorer service through the domain name `replica3.local.
|
|||
```shell
|
||||
curl http://replica3.local.tdengine.com
|
||||
```
|
||||
|
||||
</details>
|
||||
|
|
|
@ -72,8 +72,16 @@ TDengine Enterprise implements incremental backup and recovery of data by using
|
|||
7. **Directory:** Enter the full path of the directory in which you want to store backup files.
|
||||
8. **Backup file max size:** Enter the maximum size of a single backup file. If the total size of your backup exceeds this number, the backup is split into multiple files.
|
||||
9. **Compression level:** Select **fastest** for the fastest performance but lowest compression ratio, **best** for the highest compression ratio but slowest performance, or **balanced** for a combination of performance and compression.
|
||||
|
||||
4. Click **Confirm** to create the backup plan.
|
||||
4. Users can enable S3 dumping to upload backup files to the S3 storage service. To enable S3 dumping, the following information needs to be provided:
|
||||
1. **Endpoint**: The address of the S3 endpoint.
|
||||
2. **Access Key ID**: The access key ID for authentication.
|
||||
3. **Secret Access Key**: The secret access key for authentication.
|
||||
4. **Bucket**: The name of the target bucket.
|
||||
5. **Region**: The region where the bucket is located.
|
||||
6. **Object Prefix**: A prefix for backup file objects, similar to a directory path on S3.
|
||||
7. **Backup Retention Period**: The retention duration for local backups. All files older than `current time - backup_retention_period` must be uploaded to S3.
|
||||
8. **Backup Retention Count**: The number of local backups to retain. Only the latest `backup_retention_size` backup files are kept locally.
|
||||
5. Click **Confirm** to create the backup plan.
|
||||
|
||||
You can view your backup plans and modify, clone, or delete them using the buttons in the **Operation** columns. Click **Refresh** to update the status of your plans. Note that you must stop a backup plan before you can delete it. You can also click **View** in the **Backup File** column to view the backup record points and files created by each plan.
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ Power BI is a business analytics tool provided by Microsoft. By configuring the
|
|||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
**Step 2**, Open Power BI and log in, click [Home] -> [Get Data] -> [Other] -> [ODBC] -> [Connect], add data source.
|
||||
|
||||
|
|
|
@ -13,11 +13,11 @@ Prepare the following environment:
|
|||
- TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install and run Tableau Desktop (if not installed, please download and install Windows operating system 64-bit [Download Tableau Desktop](https://www.tableau.com/products/desktop/download)). Install Tableau please refer to [Tableau Desktop](https://www.tableau.com).
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, Search and open the "ODBC Data Source (64 bit)" management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
**Step 1**, Search and open the "ODBC Data Source (64 bit)" management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
:::tip
|
||||
It should be noted that when configuring the ODBC data source for Tableau, the [Database] configuration item on the TDengine ODBC data source configuration page is required. You need to select a database that can be successfully connected.
|
||||
|
@ -27,19 +27,19 @@ It should be noted that when configuring the ODBC data source for Tableau, the [
|
|||
|
||||
**Step 3**, Click the `DSN` radio button, then select the configured data source (MyTDengine), and click the `Connect` button. After the connection is successful, delete the content of the string attachment, and finally click the `Sign In` button.
|
||||
|
||||

|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
**Step 1**, In the workbook page, the connected data sources will be displayed. Clicking on the dropdown list of databases will display the databases that require data analysis. On this basis, click the search button in the table options to display all tables in the database. Then, drag the table to be analyzed to the right area to display the table structure.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 2**, Click the `Update Now` button below to display the data in the table.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 3**, Click on the "Worksheet" at the bottom of the window to pop up the data analysis window, which displays all the fields of the analysis table. Drag the fields to the rows and columns to display the chart.
|
||||
|
||||

|
||||

|
||||
|
||||
|
|
|
@ -13,30 +13,30 @@ Prepare the following environment:
|
|||
- TDengine 3.3.5.8 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install and run Excel. If not installed, please download and install it. For specific instructions, please refer to Microsoft's official documentation.
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
- Download the latest Windows operating system X64 client driver from the TDengine official website and install it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#Installation).
|
||||
**Step 1**, Search and open the [ODBC Data Source (64 bit)] management tool in the Start menu of the Windows operating system and configure it, refer to [Install ODBC Driver](../../../tdengine-reference/client-libraries/odbc/#installation).
|
||||
|
||||
**Step 2**, Start Excel in the Windows system environment, then select [Data] -> [Get Data] -> [From Other Sources] -> [From ODBC].
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 3**, In the pop-up window, select the data source you need to connect to from the drop-down list of [Data source name (DSN)], and then click the [OK] button.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 4**, Enter the username and password for TDengine.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 5**, In the pop-up [Navigator] dialog box, select the database tables you want to load, and then click [Load] to complete the data loading.
|
||||
|
||||

|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
Select the imported data. On the [Insert] tab, choose the column chart, and then configure the data fields in the [PivotChart Fields] pane on the right.
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
---
|
||||
sidebar_label: FineBI
|
||||
title: Integration With FineBI
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
Fanruan is a technology company specializing in the field of business intelligence and data analytics. With its self-developed core products, FineBI and FineReport, the company has established a leading position in the industry. Fanruan's BI tools are widely adopted by enterprises across various sectors, empowering users to achieve data visualization analysis, report generation, and data-driven decision support.
|
||||
|
||||
By using the TDengine Java connector, FineBI can quickly access the data in TDengine. Users can directly connect to the TDengine database in FineBI, obtain time-series data for analysis, and create visual reports, and the entire process does not require any code writing.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- TDengine 3.3.4.0 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install FineBI (if not installed, please download and install [Download FineBI](https://intl.finebi.com/download)).
|
||||
- Download the fine_conf_entity plugin to support the addition of JDBC drivers, [Download link](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d).
|
||||
- Install the JDBC driver. Download the `TDengine JDBC connector` file `taos-jdbcdriver-3.4.0-dist.jar` or a higher version from `maven.org`.
|
||||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, In the `db.script` configuration file of the FineBI server, find the `SystemConfig.driverUpload` configuration item and change its value to true.
|
||||
|
||||
- Windows system: The path of the configuration file is webapps/webroot/WEB-INF/embed/finedb/db.script under the installation directory.
|
||||
- Linux/Mac system: The path of the configuration file is /usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script.
|
||||
|
||||
**Step 2**, Start the FineBI service. Enter `http://ip:37799/webroot/decision` in the browser, where "ip" is the IP address of the FineBI server.
|
||||
|
||||
**Step 3**, After logging in to the FineBI Web page, click [System Management] -> [Plugin Management]. In the [Store App] on the right side, click [Install From Local] and select the downloaded `fine_conf_entity` plugin for installation.
|
||||
|
||||

|
||||
|
||||
**Step 4**, Click [System Management] -> [Data Connection] -> [Data Connection Management]. On the right-hand page, click the [Driver Management] button to open the configuration page. Then click the [New Driver] button, and in the pop-up window, enter a name (for example, `tdengine-websocket`) to configure the JDBC driver.
|
||||
|
||||

|
||||
|
||||
**Step 5**, On the driver configuration page, click the [Upload File] button. Select the downloaded TDengine Java Connector (e.g., `taos-jdbcdriver-3.4.0-dist.jar`) for uploading. After the upload is complete, select `com.taosdata.jdbc.ws.WebSocketDriver` from the drop-down list of [Driver], and then click [Save].
|
||||
|
||||

|
||||
|
||||
**Step 6**, On the "Data Connection Management" page, click the [New Data Connection] button. Subsequently, click "Others", and then on the right-side page, click "Other JDBC" to perform the connection configuration.
|
||||
|
||||

|
||||
|
||||
**Step 7**, On the configuration page, first enter the name of the data connection. Then, select "Custom" in the [Driver] option and choose the configured driver from the drop-down list (e.g., `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`). After that, configure the "Data Connection URL" (e.g., `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`). Once the settings are completed, click [Test Connection] in the top-right corner to test the connection. After the verification is successful, click [Save] to finish the configuration.
|
||||
|
||||
:::tip
|
||||
`fineBIDialect=mysql` The meaning of this setting is to adopt the SQL dialect rules of the MySQL database. Simply put, it tells FineBI to parse and execute relevant queries and operations in the specific way that the MySQL database handles SQL statements.
|
||||
:::
|
||||
|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
### Data preparation
|
||||
|
||||
**Step 1**, Click [Public Data]. On the right - hand page, click [New Folder] to create a folder (e.g., TDengine). Then, click the [+] button on the right side of the folder to create a "Database Table" dataset or an "SQL Dataset".
|
||||
|
||||

|
||||
|
||||
**Step 2**, Click "Database Table" to open the database table selection page. In the "Data Connection" section on the left, select the previously created connection. Then, all the tables in the database of the current connection will be displayed on the right. Select the table you need to load (e.g., meters), and click [OK]. The data in the meters table will then be displayed.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
**Step 3**, Click "SQL Dataset" to open the configuration page for the SQL dataset. First, enter the table name (used for display on the FineBI page). Then, select the previously created connection from the drop-down list of "Data from Data Connection". After that, enter the SQL statement and click "Preview" to view the query results. Finally, click [OK] to successfully create the SQL dataset.
|
||||
|
||||

|
||||
|
||||
### Smart Meter Example
|
||||
|
||||
**Step 1**, Click [My Analysis]. On the right-hand page, click [New Folder] to create a folder (for example, `TDengine`). Then, click the [+] button on the right side of the folder to create an "Analysis Subject".
|
||||
|
||||

|
||||
|
||||
**Step 2**, On the analysis subject page, select the dataset (for example, `meters`) and then click the [OK] button to complete the association of the dataset.
|
||||
|
||||

|
||||
|
||||
**Step 3**, Click the [Component] tab at the bottom of the analysis subject page to open the chart configuration page. Drag the fields to the horizontal axis or the vertical axis, and then the chart will be displayed.
|
||||
|
||||

|
Before Width: | Height: | Size: 300 KiB |
After Width: | Height: | Size: 470 KiB |
Before Width: | Height: | Size: 761 KiB |
After Width: | Height: | Size: 324 KiB |
Before Width: | Height: | Size: 1.3 MiB |
After Width: | Height: | Size: 769 KiB |
Before Width: | Height: | Size: 659 KiB |
After Width: | Height: | Size: 286 KiB |
Before Width: | Height: | Size: 505 KiB |
After Width: | Height: | Size: 205 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 19 KiB |
After Width: | Height: | Size: 21 KiB |
After Width: | Height: | Size: 38 KiB |
After Width: | Height: | Size: 44 KiB |
After Width: | Height: | Size: 33 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 110 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 38 KiB |
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 243 KiB |
After Width: | Height: | Size: 389 KiB |
Before Width: | Height: | Size: 255 KiB |
After Width: | Height: | Size: 543 KiB |
Before Width: | Height: | Size: 226 KiB |
After Width: | Height: | Size: 593 KiB |
Before Width: | Height: | Size: 107 KiB |
After Width: | Height: | Size: 189 KiB |
|
@ -44,6 +44,7 @@ The TDengine client driver provides all the APIs needed for application programm
|
|||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||
|compareAsStrInGreatest | v3.3.6.0 |Supported, effective immediately |When the greatest and least functions have both numeric and string types as parameters, the comparison type conversion rules are as follows: Integer; 1: uniformly converted to string comparison, 0: uniformly converted to numeric type comparison.|
|
||||
|
||||
### Writing Related
|
||||
|
||||
|
|
|
@ -371,10 +371,14 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
### Query Parameters
|
||||
|
||||
In query scenarios, `filetype` must be set to `query`.
|
||||
`filetype` must be set to `query`.
|
||||
|
||||
`query_mode` connect method:
|
||||
- "taosc": Native.
|
||||
- "rest" : RESTful.
|
||||
|
||||
`query_times` specifies the number of times to run the query, numeric type.
|
||||
|
||||
Query scenarios can control the execution of slow query statements by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters, where threshold controls that queries exceeding the specified exec_usec time will be killed by taosBenchmark, in seconds; interval controls the sleep time to avoid continuous slow query CPU consumption, in seconds.
|
||||
|
||||
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
|
||||
|
||||
|
@ -387,8 +391,21 @@ Configuration parameters for querying specified tables (can specify supertables,
|
|||
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
|
||||
`Mixed Query`:
|
||||
All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`.
|
||||
|
||||
- **batch_query** : Batch query power switch.
|
||||
"yes": indicates that it is enabled.
|
||||
"no": indicates that it is not enabled, and other values report errors.
|
||||
Batch query refers to dividing all SQL statements in SQL into `threads` groups, with each thread executing one group.
|
||||
Each SQL statement is queried only once before exiting, and the main thread waits for all threads to complete before determining if the `query_interval` parameter is set. If sleep is required for a specified time, each thread group is restarted and the previous process is repeated until the number of queries is exhausted.
|
||||
Functional limitations:
|
||||
- Only supports scenarios where `mixed_query` is set to 'yes'.
|
||||
- Restful queries are not supported, meaning `query_made` cannot be 'rest'.
|
||||
|
||||
- **query_interval** : Query interval, in millisecond, default is 0.
|
||||
When the 'batch_query' switch is turned on, it indicates the interval time after each batch query is completed, When closed, it indicates the interval time between each SQL query completion.
|
||||
If the execution time of the query exceeds the interval time, it will no longer wait. If the execution time of the query is less than the interval time, it is necessary to wait to make up for the interval time.
|
||||
|
||||
- **threads** : Number of threads executing the SQL query, default is 1.
|
||||
- **sqls**:
|
||||
- **sql**: The SQL command to execute, required.
|
||||
|
|
|
@ -20,6 +20,7 @@ table_options:
|
|||
table_option: {
|
||||
COMMENT 'string_value'
|
||||
| SMA(col_name [, col_name] ...)
|
||||
| KEEP value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -34,6 +35,7 @@ table_option: {
|
|||
- TAGS can have up to 128 columns, at least 1, with a total length not exceeding 16 KB.
|
||||
4. For the use of `ENCODE` and `COMPRESS`, please refer to [Column Compression](../manage-data-compression/)
|
||||
5. For explanations of parameters in table_option, please refer to [Table SQL Description](../manage-tables/)
|
||||
6. Regarding the keep parameter in table_option, it only takes effect for super tables. For detailed explanation of the keep parameter, please refer to [Database Description](02-database.md). The only difference is that the super table's keep parameter does not immediately affect query results, but only takes effect after compaction.
|
||||
|
||||
## View Supertables
|
||||
|
||||
|
@ -144,6 +146,7 @@ alter_table_options:
|
|||
|
||||
alter_table_option: {
|
||||
COMMENT 'string_value'
|
||||
| KEEP value
|
||||
}
|
||||
|
||||
```
|
||||
|
|
|
@ -276,6 +276,15 @@ TDengine supports INNER JOIN based on the timestamp primary key, with the follow
|
|||
5. Both sides of JOIN support subqueries.
|
||||
6. Does not support mixing with the FILL clause.
|
||||
|
||||
## INTERP
|
||||
The INTERP clause is a dedicated syntax for the INTERP function (../function/#interp). When an SQL statement contains an INTERP clause, it can only query the INTERP function and cannot be used with other functions. Additionally, the INTERP clause cannot be used simultaneously with window clauses (window_clause) or group by clauses (group_by_clause). The INTERP function must be used with the RANGE, EVERY, and FILL clauses; stream computing does not support the use of RANGE but requires the use of the EVERY and FILL keywords.
|
||||
- The output time range for INTERP is specified by the RANGE(timestamp1, timestamp2) field, which must satisfy timestamp1 \<= timestamp2. Here, timestamp1 is the start value of the output time range, i.e., if the conditions for interpolation are met at timestamp1, then timestamp1 is the first record output, and timestamp2 is the end value of the output time range, i.e., the timestamp of the last record output cannot be greater than timestamp2.
|
||||
- INTERP determines the number of results within the output time range based on the EVERY(time_unit) field, starting from timestamp1 and interpolating at fixed intervals of time (time_unit value), where time_unit can be time units: 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), 1w (weeks). For example, EVERY(500a) will interpolate the specified data every 500 milliseconds.
|
||||
- INTERP determines how to interpolate at each time point that meets the output conditions based on the FILL field. For how to use the FILL clause, refer to [FILL Clause](../time-series-extensions/)
|
||||
- INTERP can interpolate at a single time point specified in the RANGE field, in which case the EVERY field can be omitted. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||
- INTERP query supports NEAR FILL mode, i.e., when FILL is needed, it uses the data closest to the current time point for interpolation. When the timestamps before and after are equally close to the current time slice, FILL the previous row's value. This mode is not supported in stream computing and window queries. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR).(Supported from version 3.3.4.9).
|
||||
- INTERP `RANGE` clause supports the expansion of the time range (supported from version 3.3.4.9), such as `RANGE('2023-01-01 00:00:00', 10s)` means to find data 10s before and after the time point '2023-01-01 00:00:00' for interpolation, FILL PREV/NEXT/NEAR respectively means to look for data forward/backward/around the time point, if there is no data around the time point, then use the value specified by FILL for interpolation, therefore the FILL clause must specify a value at this time. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). Currently, only the combination of time point and time range is supported, not the combination of time interval and time range, i.e., RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h) is not supported. The specified time range rules are similar to EVERY, the unit cannot be year or month, the value cannot be 0, and cannot have quotes. When using this extension, other FILL modes except FILL PREV/NEXT/NEAR are not supported, and the EVERY clause cannot be specified.
|
||||
|
||||
## GROUP BY
|
||||
|
||||
If a GROUP BY clause is specified in the statement, the SELECT list can only contain the following expressions:
|
||||
|
|
|
@ -124,7 +124,39 @@ FLOOR(expr)
|
|||
```
|
||||
|
||||
**Function Description**: Gets the floor of the specified field.
|
||||
Other usage notes see CEIL function description.
|
||||
Other usage notes see [CEIL](#ceil) function description.
|
||||
|
||||
#### GREATEST
|
||||
```sql
|
||||
GREATEST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**Function Description**: Get the maximum value of all input parameters. The minimum number of parameters for this function is 2.
|
||||
|
||||
**Version**:ver-3.3.6.0
|
||||
|
||||
**Return Type**:Refer to the comparison rules. The comparison type is the final return type.
|
||||
|
||||
**Applicable Data Types**:
|
||||
- Numeric types: timestamp, bool, integer and floating point types
|
||||
- Strings types: nchar and varchar types.
|
||||
|
||||
**Comparison rules**: The following rules describe the conversion method of the comparison operation:
|
||||
- If any parameter is NULL, the comparison result is NULL.
|
||||
- If all parameters in the comparison operation are string types, compare them as string types
|
||||
- If all parameters are numeric types, compare them as numeric types.
|
||||
- If there are both string types and numeric types in the parameters, according to the `compareAsStrInGreatest` configuration item, they are uniformly compared as strings or numeric values. By default, they are compared as strings.
|
||||
- In all cases, when different types are compared, the comparison type will choose the type with a larger range for comparison. For example, when comparing integer types, if there is a BIGINT type, BIGINT will definitely be selected as the comparison type.
|
||||
|
||||
**Related configuration items**: Client configuration, compareAsStrInGreatest is 1, which means that both string types and numeric types are converted to string comparisons, and 0 means that they are converted to numeric types. The default is 1.
|
||||
|
||||
|
||||
#### LEAST
|
||||
```sql
|
||||
LEAST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**Function Description**:Get the minimum value of all input parameters. The rest of the description is the same as the [GREATEST](#greatest) function.
|
||||
|
||||
#### LOG
|
||||
|
||||
|
@ -1933,42 +1965,6 @@ FIRST(expr)
|
|||
- If all columns in the result set are NULL, no results are returned.
|
||||
- For tables with composite primary keys, if there are multiple entries with the smallest timestamp, only the data with the smallest composite primary key is returned.
|
||||
|
||||
### INTERP
|
||||
|
||||
```sql
|
||||
INTERP(expr [, ignore_null_values])
|
||||
|
||||
ignore_null_values: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Function Description**: Returns the record value or interpolated value of a specified column at a specified time slice. The ignore_null_values parameter can be 0 or 1, where 1 means to ignore NULL values, default is 0.
|
||||
|
||||
**Return Data Type**: Same as the field type.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Usage Instructions**
|
||||
|
||||
- INTERP is used to obtain the record value of a specified column at a specified time slice. If there is no row data that meets the conditions at that time slice, interpolation will be performed according to the settings of the FILL parameter.
|
||||
- The input data for INTERP is the data of the specified column, which can be filtered through conditional statements (where clause). If no filtering condition is specified, the input is all data.
|
||||
- INTERP SQL queries need to be used together with the RANGE, EVERY, and FILL keywords; stream computing cannot use RANGE, needs EVERY and FILL keywords together.
|
||||
- The output time range for INTERP is specified by the RANGE(timestamp1, timestamp2) field, which must satisfy timestamp1 \<= timestamp2. Here, timestamp1 is the start value of the output time range, i.e., if the conditions for interpolation are met at timestamp1, then timestamp1 is the first record output, and timestamp2 is the end value of the output time range, i.e., the timestamp of the last record output cannot be greater than timestamp2.
|
||||
- INTERP determines the number of results within the output time range based on the EVERY(time_unit) field, starting from timestamp1 and interpolating at fixed intervals of time (time_unit value), where time_unit can be time units: 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), 1w (weeks). For example, EVERY(500a) will interpolate the specified data every 500 milliseconds.
|
||||
- INTERP determines how to interpolate at each time point that meets the output conditions based on the FILL field. For how to use the FILL clause, refer to [FILL Clause](../time-series-extensions/)
|
||||
- INTERP can interpolate at a single time point specified in the RANGE field, in which case the EVERY field can be omitted. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||
- When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline.
|
||||
- INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0).
|
||||
- INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0).
|
||||
- For queries on tables with composite primary keys, if there are data with the same timestamp, only the data with the smallest composite primary key participates in the calculation.
|
||||
- INTERP query supports NEAR FILL mode, i.e., when FILL is needed, it uses the data closest to the current time point for interpolation. When the timestamps before and after are equally close to the current time slice, FILL the previous row's value. This mode is not supported in stream computing and window queries. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR).(Supported from version 3.3.4.9).
|
||||
- INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9.
|
||||
- INTERP `RANGE` clause supports the expansion of the time range (supported from version 3.3.4.9), For example, `RANGE('2023-01-01 00:00:00', 10s)` means that only data within 10s around the time point '2023-01-01 00:00:00' can be used for interpolation. `FILL PREV/NEXT/NEAR` respectively means to look for data forward/backward/around the time point. If there is no data around the time point, the default value specified by `FILL` is used for interpolation. Therefore the `FILL` clause must specify the default value at the same time. For example: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). Starting from the 3.3.6.0 version, the combination of time period and time range is supported. When interpolating for each point within the time period, the time range requirement must be met. Prior versions only supported single time point and its time range. The available values for time range are similar to `EVERY`, the unit cannot be year or month, the value must be greater than 0, and cannot be in quotes. When using this extension, `FILL` modes other than `PREV/NEXT/NEAR` are not supported.
|
||||
|
||||
### LAST
|
||||
|
||||
```sql
|
||||
|
@ -2234,6 +2230,35 @@ ignore_option: {
|
|||
- When there is no composite primary key, if different subtables have data with the same timestamp, a "Duplicate timestamps not allowed" message will be displayed
|
||||
- When using composite primary keys, the timestamp and primary key combinations of different subtables may be the same, which row is used depends on which one is found first, meaning that the results of running diff() multiple times in this situation may vary.
|
||||
|
||||
### INTERP
|
||||
|
||||
```sql
|
||||
INTERP(expr [, ignore_null_values])
|
||||
|
||||
ignore_null_values: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Function Description**: Returns the record value or interpolated value of a specified column at a specified time slice. The ignore_null_values parameter can be 0 or 1, where 1 means to ignore NULL values, default is 0.
|
||||
|
||||
**Return Data Type**: Same as the field type.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Usage Instructions**
|
||||
|
||||
- INTERP is used to obtain the record value of a specified column at the specified time slice. It has a dedicated syntax (interp_clause) when used. For syntax introduction, see [reference link](../query-data/#interp).
|
||||
- When there is no row data that meets the conditions at the specified time slice, the INTERP function will interpolate according to the settings of the [FILL](../time-series-extensions/#fill-clause) parameter.
|
||||
- When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline.
|
||||
- INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0).
|
||||
- INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0).
|
||||
- INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9.
|
||||
- For queries on tables with composite primary keys, if there are data with the same timestamp, only the data with the smallest composite primary key participates in the calculation.
|
||||
|
||||
### IRATE
|
||||
|
||||
```sql
|
||||
|
|
|
@ -15,7 +15,7 @@ stream_options: {
|
|||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -127,6 +127,13 @@ create stream if not exists s1 fill_history 1 into st1 as select count(*) from
|
|||
|
||||
If the stream task is completely outdated and you no longer want it to monitor or process data, you can manually delete it. The computed data will still be retained.
|
||||
|
||||
Tips:
|
||||
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
|
||||
|
||||
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
|
||||
|
||||
- Besides, do not create multiple streams asynchronously at the same time, as transaction conflicts may cause subsequent streams to fail.
|
||||
|
||||
## Deleting Stream Computing
|
||||
|
||||
```sql
|
||||
|
|
|
@ -73,6 +73,8 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
||||
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
||||
| 0x8000013C | Invalid disk id | Invalid disk id | Check users whether the mounted disk is invalid or use the parameter diskIDCheckEnabled to skip the disk check. |
|
||||
| 0x8000013D | Decimal value overflow | Decimal value overflow | Check query expression and decimal values |
|
||||
| 0x8000013E | Division by zero error | Division by zero | Check division expression |
|
||||
|
||||
|
||||
## tsc
|
||||
|
@ -109,6 +111,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x8000030C | Invalid query id | Internal error | Report issue |
|
||||
| 0x8000030E | Invalid connection id | Internal error | Report issue |
|
||||
| 0x80000315 | User is disabled | User is unavailable | Grant permissions |
|
||||
| 0x80000318 | Mnode internal error | Internal error | Report issue |
|
||||
| 0x80000320 | Object already there | Internal error | Report issue |
|
||||
| 0x80000322 | Invalid table type | Internal error | Report issue |
|
||||
| 0x80000323 | Object not there | Internal error | Report issue |
|
||||
|
@ -371,7 +374,7 @@ This document details the server error codes that may be encountered when using
|
|||
## parser
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Suggested Actions for Users |
|
||||
| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
|------------| ------------------------------------------------------------ |----------------------------------------------------------------------------| ------------------------------------------------------------ |
|
||||
| 0x80002600 | syntax error near | SQL syntax error | Check and correct the SQL statement |
|
||||
| 0x80002601 | Incomplete SQL statement | Incomplete SQL statement | Check and correct the SQL statement |
|
||||
| 0x80002602 | Invalid column name | Illegal or non-existent column name | Check and correct the SQL statement |
|
||||
|
@ -462,12 +465,19 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80002688 | Cannot use 'year' or 'month' as true_for duration | Use year or month as true_for_duration | Check and correct the SQL statement |
|
||||
| 0x80002689 | Invalid using cols function | Illegal using cols function | Check and correct the SQL statement |
|
||||
| 0x8000268A | Cols function's first param must be a select function that output a single row | The first parameter of the cols function should be a selection function | Check and correct the SQL statement |
|
||||
| 0x8000268B | Invalid using cols function with multiple output columns | Illegal using the cols function for multiple column output | Check and correct the SQL statement |
|
||||
| 0x8000268C | Invalid using alias for cols function | Illegal cols function alias | Check and correct the SQL statement |
|
||||
| 0x8000268B | Invalid using alias for cols function | Illegal cols function alias | Check and correct the SQL statement |
|
||||
| 0x8000268C | Join primary key col must be timestmap type | Join primary key data type error | Check and correct the SQL statement |
|
||||
| 0x8000268D | Invalid virtual table's ref column | Create/Update Virtual table using incorrect data source column | Check and correct the SQL statement |
|
||||
| 0x8000268E | Invalid table type | Incorrect Table type | Check and correct the SQL statement |
|
||||
| 0x8000268F | Invalid ref column type | Virtual table's column type and data source column's type are different | Check and correct the SQL statement |
|
||||
| 0x80002690 | Create child table using virtual super table | Create non-virtual child table using virtual super table | Check and correct the SQL statement |
|
||||
| 0x800026FF | Parser internal error | Internal error in parser | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002700 | Planner internal error | Internal error in planner | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002701 | Expect ts equal | JOIN condition validation failed | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002702 | Cross join not support | CROSS JOIN not supported | Check and correct the SQL statement |
|
||||
| 0x80002704 | Planner slot key not found | Planner cannot find slotId during making physic plan | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002705 | Planner invalid table type | Planner get invalid table type | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002706 | Planner invalid query control plan type | Planner get invalid query control plan type during making physic plan | Preserve the scene and logs, report issue on GitHub |
|
||||
|
||||
## function
|
||||
|
||||
|
@ -545,3 +555,12 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
|
||||
| 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs |
|
||||
|
||||
|
||||
## virtual table
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|
||||
|-------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|
|
||||
| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling |
|
||||
| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling |
|
||||
| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling |
|
||||
| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table |
|
||||
|
|
|
@ -27,7 +27,7 @@ stream_options: {
|
|||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -101,6 +101,13 @@ PARTITION 子句中,为 tbname 定义了一个别名 tname, 在 PARTITION
|
|||
|
||||
通过启用 fill_history 选项,创建的流计算任务将具备处理创建前、创建过程中以及创建后写入的数据的能力。这意味着,无论数据是在流创建之前还是之后写入的,都将纳入流计算的范围,从而确保数据的完整性和一致性。这一设置为用户提供了更大的灵活性,使其能够根据实际需求灵活处理历史数据和新数据。
|
||||
|
||||
注意:
|
||||
- 开启 fill_history 时,创建流需要找到历史数据的分界点,如果历史数据很多,可能会导致创建流任务耗时较长,此时可以通过 fill_history 1 async(3.3.6.0版本开始支持) 语法将创建流的任务放在后台处理,创建流的语句可立即返回,不阻塞后面的操作。async 只对 fill_history 1 起效,fill_history 0 时建流很快,不需要异步处理。
|
||||
|
||||
- 通过 show streams 可查看后台建流的进度(ready 状态表示成功,init 状态表示正在建流,failed 状态表示建流失败,失败时 message 列可以查看原因。对于建流失败的情况可以删除流重新建立)。
|
||||
|
||||
- 另外,不要同时异步创建多个流,可能由于事务冲突导致后面创建的流失败。
|
||||
|
||||
比如,创建一个流,统计所有智能电表每 10s 产生的数据条数,并且计算历史数据。SQL 如下:
|
||||
```sql
|
||||
create stream if not exists count_history_s fill_history 1 into count_history as select count(*) from power.meters interval(10s)
|
||||
|
|
|
@ -90,7 +90,7 @@ Header 是 CSV 文件的第一行,规则如下:
|
|||
|
||||
|
||||
| 序号 | 列名 | 描述 | 是否必填 | 默认行为 |
|
||||
| ---- | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|----|-------------------------|--------------------------------------------------------------------------------------------------------------------| -------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 1 | point_id | 数据点位在 OPC UA 服务器上的 id | 是 | 无 |
|
||||
| 2 | stable | 数据点位在 TDengine 中对应的超级表 | 是 | 无 |
|
||||
| 3 | tbname | 数据点位在 TDengine 中对应的子表 | 是 | 无 |
|
||||
|
@ -99,11 +99,13 @@ Header 是 CSV 文件的第一行,规则如下:
|
|||
| 6 | value_transform | 数据点位采集值在 taosX 中执行的变换函数 | 否 | 统一不进行采集值的 transform |
|
||||
| 7 | type | 数据点位采集值的数据类型 | 否 | 统一使用采集值的原始类型作为 TDengine 中的数据类型 |
|
||||
| 8 | quality_col | 数据点位采集值质量在 TDengine 中对应的列名 | 否 | 统一不在 TDengine 添加 quality 列 |
|
||||
| 9 | ts_col | 数据点位的原始时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col 和 received_ts_col 都非空时,使用前者作为时间戳列;ts_col 和 received_ts_col 有一列非空时,使用不为空的列作时间戳列;ts_col 和 received_ts_col 都为空时,使用数据点位原始时间戳作 TDengine 中的时间戳列,且列名为默认值`ts`。 |
|
||||
| 10 | received_ts_col | 接收到该点位采集值时的时间戳在 TDengine 中对应的时间戳列 | 否 | 同上 |
|
||||
| 11 | ts_transform | 数据点位时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位原始时间戳的 transform |
|
||||
| 12 | received_ts_transform | 数据点位接收时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位接收时间戳的 transform |
|
||||
| 13 | tag::VARCHAR(200)::name | 数据点位在 TDengine 中对应的 Tag 列。其中`tag` 为保留关键字,表示该列为一个 tag 列;`VARCHAR(200)` 表示该 tag 的类型,也可以是其它合法的类型;`name` 是该 tag 的实际名称。 | 否 | 配置 1 个以上的 tag 列,则使用配置的 tag 列;没有配置任何 tag 列,且 stable 在 TDengine 中存在,使用 TDengine 中的 stable 的 tag;没有配置任何 tag 列,且 stable 在 TDengine 中不存在,则默认自动添加以下 2 个 tag 列:tag::VARCHAR(256)::point_id 和 tag::VARCHAR(256)::point_name |
|
||||
| 9 | ts_col | 数据点位的原始时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col,request_ts,received_ts 这 3 列,当有 2 列以上存在时,以最左侧的列作为 TDengine 中的主键。 |
|
||||
| 10 | request_ts_col | 请求到该点位采集值时的时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col,request_ts,received_ts 这 3 列,当有 2 列以上存在时,以最左侧的列作为 TDengine 中的主键。 |
|
||||
| 11 | received_ts_col | 接收到该点位采集值时的时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col,request_ts,received_ts 这 3 列,当有 2 列以上存在时,以最左侧的列作为 TDengine 中的主键。 |
|
||||
| 12 | ts_transform | 数据点位时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位原始时间戳的 transform |
|
||||
| 13 | request_ts_transform | 数据点位接收时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位接收时间戳的 transform |
|
||||
| 14 | received_ts_transform | 数据点位接收时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位接收时间戳的 transform |
|
||||
| 15 | tag::VARCHAR(200)::name | 数据点位在 TDengine 中对应的 Tag 列。其中`tag` 为保留关键字,表示该列为一个 tag 列;`VARCHAR(200)` 表示该 tag 的类型,也可以是其它合法的类型;`name` 是该 tag 的实际名称。 | 否 | 配置 1 个以上的 tag 列,则使用配置的 tag 列;没有配置任何 tag 列,且 stable 在 TDengine 中存在,使用 TDengine 中的 stable 的 tag;没有配置任何 tag 列,且 stable 在 TDengine 中不存在,则默认自动添加以下 2 个 tag 列:tag::VARCHAR(256)::point_id 和 tag::VARCHAR(256)::point_name |
|
||||
|
||||
(2) CSV Header 中,不能有重复的列;
|
||||
|
||||
|
@ -121,7 +123,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
|||
|
||||
|
||||
| 序号 | Header 中的列 | 值的类型 | 值的范围 | 是否必填 | 默认值 |
|
||||
| ---- | ----------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------ |
|
||||
|----|-------------------------| -------- |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------| ------------------------ |
|
||||
| 1 | point_id | String | 类似`ns=3;i=1005`这样的字符串,要满足 OPC UA 的 ID 的规范,即:包含 ns 和 id 部分 | 是 | |
|
||||
| 2 | enable | int | 0:不采集该点位,且在 OPC DataIn 任务开始前,删除 TDengine 中点位对应的子表;1:采集该点位,在 OPC DataIn 任务开始前,不删除子表。 | 否 | 1 |
|
||||
| 3 | stable | String | 符合 TDengine 超级表命名规范的任何字符串;如果存在特殊字符`.`,使用下划线替换如果存在`{type}`,则:CSV 文件的 type 不为空,使用 type 的值进行替换CSV 文件的 type 为空,使用采集值的原始类型进行替换 | 是 | |
|
||||
|
@ -131,10 +133,12 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
|||
| 7 | type | String | 支持类型包括:b/bool/i8/tinyint/i16/small/inti32/int/i64/bigint/u8/tinyint unsigned/u16/smallint unsigned/u32/int unsigned/u64/bigint unsigned/f32/float/f64/double/timestamp/timestamp(ms)/timestamp(us)/timestamp(ns)/json | 否 | 数据点位采集值的原始类型 |
|
||||
| 8 | quality_col | String | 符合 TDengine 命名规范的列名 | 否 | None |
|
||||
| 9 | ts_col | String | 符合 TDengine 命名规范的列名 | 否 | ts |
|
||||
| 10 | received_ts_col | String | 符合 TDengine 命名规范的列名 | 否 | rts |
|
||||
| 11 | ts_transform | String | 支持 +、-、*、/、% 操作符,例如:ts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;ts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;ts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 12 | received_ts_transform | String | 否 | None | |
|
||||
| 13 | tag::VARCHAR(200)::name | String | tag 里的值,当 tag 的类型是 VARCHAR 时,可以是中文 | 否 | NULL |
|
||||
| 10 | request_ts_col | String | 符合 TDengine 命名规范的列名 | 否 | rts |
|
||||
| 11 | received_ts_col | String | 符合 TDengine 命名规范的列名 | 否 | rts |
|
||||
| 12 | ts_transform | String | 支持 +、-、*、/、% 操作符,例如:ts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;ts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;ts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 13 | request_ts_transform | String | 支持 +、-、*、/、% 操作符,例如:qts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;qts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;qts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 14 | received_ts_transform | String | 支持 +、-、*、/、% 操作符,例如:rts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;rts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;rts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 15 | tag::VARCHAR(200)::name | String | tag 里的值,当 tag 的类型是 VARCHAR 时,可以是中文 | 否 | NULL |
|
||||
|
||||
(2) point_id 在整个 DataIn 任务中是唯一的,即:在一个 OPC DataIn 任务中,一个数据点位只能被写入到 TDengine 的一张子表。如果需要将一个数据点位写入多张子表,需要建多个 OPC DataIn 任务;
|
||||
|
||||
|
@ -154,7 +158,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
|||
|
||||
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
||||
|
||||
配置**主键列**,选择 origin_ts 表示使用 OPC 点位数据的原始时间戳作 TDengine 中的主键;选择 received_ts 表示使用数据的接收时间戳作 TDengine 中的主键。配置**主键别名**,指定 TDengine 时间戳列的名称。
|
||||
配置**主键列**,选择 origin_ts 表示使用 OPC 点位数据的原始时间戳作 TDengine 中的主键;选择 request_ts 表示使用数据的请求时间戳作 TDengine 中的主键;选择 received_ts 表示使用数据的接收时间戳作 TDengine 中的主键。配置**主键别名**,指定 TDengine 时间戳列的名称。
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ Header 是 CSV 文件的第一行,规则如下:
|
|||
|
||||
|
||||
| 序号 | 列名 | 描述 | 是否必填 | 默认行为 |
|
||||
| ---- | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
|----|-------------------------|-------------------------------------------------------------------------------------------------------------------| -------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 1 | tag_name | 数据点位在 OPC DA 服务器上的 id | 是 | 无 |
|
||||
| 2 | stable | 数据点位在 TDengine 中对应的超级表 | 是 | 无 |
|
||||
| 3 | tbname | 数据点位在 TDengine 中对应的子表 | 是 | 无 |
|
||||
|
@ -75,11 +75,13 @@ Header 是 CSV 文件的第一行,规则如下:
|
|||
| 6 | value_transform | 数据点位采集值在 taosX 中执行的变换函数 | 否 | 统一不进行采集值的 transform |
|
||||
| 7 | type | 数据点位采集值的数据类型 | 否 | 统一使用采集值的原始类型作为 TDengine 中的数据类型 |
|
||||
| 8 | quality_col | 数据点位采集值质量在 TDengine 中对应的列名 | 否 | 统一不在 TDengine 添加 quality 列 |
|
||||
| 9 | ts_col | 数据点位的原始时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col 和 received_ts_col 都非空时,使用前者作为时间戳列;ts_col 和 received_ts_col 有一列非空时,使用不为空的列作时间戳列;ts_col 和 received_ts_col 都为空时,使用数据点位原始时间戳作 TDengine 中的时间戳列,且列名为默认值ts。 |
|
||||
| 10 | received_ts_col | 接收到该点位采集值时的时间戳在 TDengine 中对应的时间戳列 | 否 | |
|
||||
| 11 | ts_transform | 数据点位时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位原始时间戳的 transform |
|
||||
| 12 | received_ts_transform | 数据点位接收时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位接收时间戳的 transform |
|
||||
| 13 | tag::VARCHAR(200)::name | 数据点位在 TDengine 中对应的 Tag 列。其中`tag` 为保留关键字,表示该列为一个 tag 列;`VARCHAR(200)` 表示该 tag 的类型,也可以是其它合法的类型;`name` 是该 tag 的实际名称。 | 否 | 配置 1 个以上的 tag 列,则使用配置的 tag 列;没有配置任何 tag 列,且 stable 在 TDengine 中存在,使用 TDengine 中的 stable 的 tag;没有配置任何 tag 列,且 stable 在 TDengine 中不存在,则默认自动添加以下 2 个 tag 列:tag::VARCHAR(256)::point_idtag::VARCHAR(256)::point_name |
|
||||
| 9 | ts_col | 数据点位的原始时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col,request_ts,received_ts 这 3 列,当有 2 列以上存在时,以最左侧的列作为 TDengine 中的主键 |
|
||||
| 10 | request_ts_col | 请求该点位采集值时的时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col,request_ts,received_ts 这 3 列,当有 2 列以上存在时,以最左侧的列作为 TDengine 中的主键 |
|
||||
| 11 | received_ts_col | 接收到该点位采集值时的时间戳在 TDengine 中对应的时间戳列 | 否 | ts_col,request_ts,received_ts 这 3 列,当有 2 列以上存在时,以最左侧的列作为 TDengine 中的主键 |
|
||||
| 12 | ts_transform | 数据点位时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位原始时间戳的 transform |
|
||||
| 13 | request_ts_transform | 数据点位请求时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位原始时间戳的 transform |
|
||||
| 14 | received_ts_transform | 数据点位接收时间戳在 taosX 中执行的变换函数 | 否 | 统一不进行数据点位接收时间戳的 transform |
|
||||
| 15 | tag::VARCHAR(200)::name | 数据点位在 TDengine 中对应的 Tag 列。其中`tag` 为保留关键字,表示该列为一个 tag 列;`VARCHAR(200)` 表示该 tag 的类型,也可以是其它合法的类型;`name` 是该 tag 的实际名称。 | 否 | 配置 1 个以上的 tag 列,则使用配置的 tag 列;没有配置任何 tag 列,且 stable 在 TDengine 中存在,使用 TDengine 中的 stable 的 tag;没有配置任何 tag 列,且 stable 在 TDengine 中不存在,则默认自动添加以下 2 个 tag 列:tag::VARCHAR(256)::point_idtag::VARCHAR(256)::point_name |
|
||||
|
||||
(2) CSV Header 中,不能有重复的列;
|
||||
|
||||
|
@ -95,9 +97,8 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
|||
|
||||
(1) 与 Header 中的列有如下对应关系
|
||||
|
||||
|
||||
| 序号 | Header 中的列 | 值的类型 | 值的范围 | 是否必填 | 默认值 |
|
||||
| ---- | ----------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------------------------ |
|
||||
|----|-------------------------| -------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------|
|
||||
| 1 | tag_name | String | 类似`root.parent.temperature`这样的字符串,要满足 OPC DA 的 ID 规范 | 是 | |
|
||||
| 2 | enable | int | 0:不采集该点位,且在 OPC DataIn 任务开始前,删除 TDengine 中点位对应的子表;1:采集该点位,在 OPC DataIn 任务开始前,不删除子表。 | 否 | 1 |
|
||||
| 3 | stable | String | 符合 TDengine 超级表命名规范的任何字符串;如果存在特殊字符`.`,使用下划线替换如果存在`{type}`,则:CSV 文件的 type 不为空,使用 type 的值进行替换CSV 文件的 type 为空,使用采集值的原始类型进行替换 | 是 | |
|
||||
|
@ -107,10 +108,12 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
|||
| 7 | type | String | 支持类型包括:b/bool/i8/tinyint/i16/smallint/i32/int/i64/bigint/u8/tinyint unsigned/u16/smallint unsigned/u32/int unsigned/u64/bigint unsigned/f32/floatf64/double/timestamp/timestamp(ms)/timestamp(us)/timestamp(ns)/json | 否 | 数据点位采集值的原始类型 |
|
||||
| 8 | quality_col | String | 符合 TDengine 命名规范的列名 | 否 | None |
|
||||
| 9 | ts_col | String | 符合 TDengine 命名规范的列名 | 否 | ts |
|
||||
| 10 | received_ts_col | String | 符合 TDengine 命名规范的列名 | 否 | rts |
|
||||
| 11 | ts_transform | String | 支持 +、-、*、/、% 操作符,例如:ts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;ts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;ts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 12 | received_ts_transform | String | 否 | None | |
|
||||
| 13 | tag::VARCHAR(200)::name | String | tag 里的值,当 tag 的类型是 VARCHAR 时,可以是中文 | 否 | NULL |
|
||||
| 10 | request_ts_col | String | 符合 TDengine 命名规范的列名 | 否 | qts |
|
||||
| 11 | received_ts_col | String | 符合 TDengine 命名规范的列名 | 否 | rts |
|
||||
| 12 | ts_transform | String | 支持 +、-、*、/、% 操作符,例如:ts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;ts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;ts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 13 | request_ts_transform | String | 支持 +、-、*、/、% 操作符,例如:ts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;qts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;qts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 14 | received_ts_transform | String | 支持 +、-、*、/、% 操作符,例如:ts / 1000 * 1000,将一个 ms 单位的时间戳的最后 3 位置为 0;rts + 8 * 3600 * 1000,将一个 ms 精度的时间戳,增加 8 小时;rts - 8 * 3600 * 1000,将一个 ms 精度的时间戳,减去 8 小时; | 否 | None |
|
||||
| 15 | tag::VARCHAR(200)::name | String | tag 里的值,当 tag 的类型是 VARCHAR 时,可以是中文 | 否 | NULL |
|
||||
|
||||
(2) tag_name 在整个 DataIn 任务中是唯一的,即:在一个 OPC DataIn 任务中,一个数据点位只能被写入到 TDengine 的一张子表。如果需要将一个数据点位写入多张子表,需要建多个 OPC DataIn 任务;
|
||||
|
||||
|
@ -130,7 +133,7 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
|
|||
|
||||
通过配置 **超级表名**、**表名称**,指定数据要写入的超级表、子表。
|
||||
|
||||
配置**主键列**,选择 origin_ts 表示使用 OPC 点位数据的原始时间戳作 TDengine 中的主键;选择 received_ts 表示使用数据的接收时间戳作 TDengine 中的主键。配置**主键别名**,指定 TDengine 时间戳列的名称。
|
||||
配置**主键列**,选择 origin_ts 表示使用 OPC 点位数据的原始时间戳作 TDengine 中的主键;选择 request_ts 表示使用数据的请求时间戳作 TDengine 中的主键;选择 received_ts 表示使用数据的接收时间戳作 TDengine 中的主键。配置**主键别名**,指定 TDengine 时间戳列的名称。
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -37,6 +37,14 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||
在 **MQTT 端口** 中填写 MQTT 代理的端口,例如:`1883`
|
||||
|
||||
在 **TLS 校验** 中选择 TLS 证书的校验方式
|
||||
|
||||
1. 不开启:表示不进行 TLS 证书认证。在连接 MQTT 时,会先进行 TCP 连接,如果连接失败,会进行无证书认证模式的 TLS 连接。
|
||||
|
||||
2. 单向认证:开启 TLS 连接,并验证服务端证书,此时需要上传 CA 证书。
|
||||
|
||||
3. 双向认证:开启 TLS 连接,并与服务端进行双向认证,此时需要上传 CA 证书,客户端证书以及客户端密钥。
|
||||
|
||||
在 **用户** 中填写 MQTT 代理的用户名。
|
||||
|
||||
在 **密码** 中填写 MQTT 代理的密码。
|
||||
|
@ -44,13 +52,7 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||

|
||||
|
||||
### 4. 配置 SSL 证书
|
||||
|
||||
如果 MQTT 代理使用了 SSL 证书,需要在 **SSL证书** 中上传证书文件。
|
||||
|
||||

|
||||
|
||||
### 5. 配置采集信息
|
||||
### 4. 配置采集信息
|
||||
|
||||
在 **采集配置** 区域填写采集任务相关的配置参数。
|
||||
|
||||
|
@ -75,13 +77,13 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||

|
||||
|
||||
### 6. 配置 MQTT Payload 解析
|
||||
### 5. 配置 MQTT Payload 解析
|
||||
|
||||
在 **MQTT Payload 解析** 区域填写 Payload 解析相关的配置参数。
|
||||
|
||||
taosX 可以使用 JSON 提取器解析数据,并允许用户在数据库中指定数据模型,包括,指定表名称和超级表名,设置普通列和标签列等。
|
||||
|
||||
#### 6.1 解析
|
||||
#### 5.1 解析
|
||||
|
||||
有三种获取示例数据的方法:
|
||||
|
||||
|
@ -112,7 +114,7 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
#### 6.2 字段拆分
|
||||
#### 5.2 字段拆分
|
||||
|
||||
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 message 字段拆分成 `message_0` 和 `message_1` 这2 个字段,选择 split 提取器,seperator 填写 -, number 填写 2。
|
||||
|
||||
|
@ -126,7 +128,7 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
#### 6.3 数据过滤
|
||||
#### 5.3 数据过滤
|
||||
|
||||
在 **过滤** 中,填写过滤条件,例如:填写`id != 1`,则只有 id 不为 1 的数据才会被写入 TDengine。
|
||||
|
||||
|
@ -138,7 +140,7 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
#### 6.4 表映射
|
||||
#### 5.4 表映射
|
||||
|
||||
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮创建新的超级表。
|
||||
|
||||
|
@ -164,7 +166,7 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
### 7. 高级选项
|
||||
### 6. 高级选项
|
||||
|
||||
在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0,即不缓存。
|
||||
|
||||
|
@ -182,12 +184,12 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
### 8. 异常处理策略
|
||||
### 7. 异常处理策略
|
||||
|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
<Contributing />
|
||||
|
||||
### 9. 创建完成
|
||||
### 8. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 MQTT 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
||||
|
|
|
@ -9,9 +9,15 @@
|
|||
> 丢弃:将异常数据忽略,不写入目标库
|
||||
> 报错:任务报错
|
||||
|
||||
- **目标库连接超时** 目标库连接失败,可选处理策略:归档、丢弃、报错、缓存
|
||||
> 缓存:当目标库状态异常(连接错误或资源不足等情况)时写入缓存文件(默认路径为 `${data_dir}/tasks/_id/.datetime`),目标库恢复正常后重新入库
|
||||
- **目标库不存在** 写入报错目标库不存在,可选处理策略:归档、丢弃、报错
|
||||
- **表不存在** 写入报错表不存在,可选处理策略:归档、丢弃、报错、自动建表
|
||||
> 自动建表:自动建表,建表成功后重试
|
||||
- **主键时间戳溢出** 检查数据中第一列时间戳是否在正确的时间范围内(now - keep1, now + 100y),可选处理策略:归档、丢弃、报错
|
||||
- **主键时间戳空** 检查数据中第一列时间戳是否为空,可选处理策略:归档、丢弃、报错、使用当前时间
|
||||
> 使用当前时间:使用当前时间填充到空的时间戳字段中
|
||||
- **复合主键空** 写入报错复合主键空,可选处理策略:归档、丢弃、报错
|
||||
- **表名长度溢出** 检查子表表名的长度是否超出限制(最大 192 字符),可选处理策略:归档、丢弃、报错、截断、截断且归档
|
||||
> 截断:截取原始表名的前 192 个字符作为新的表名
|
||||
> 截断且归档:截取原始表名的前 192 个字符作为新的表名,并且将此行记录写入归档文件
|
||||
|
@ -20,4 +26,20 @@
|
|||
- **表名模板变量空值** 检查子表表名模板中的变量是否为空,可选处理策略:丢弃、留空、变量替换为指定字符串
|
||||
> 留空:变量位置不做任何特殊处理,例如 `a_{x}` 转换为 `a_`
|
||||
> 变量替换为指定字符串:变量位置使用后方输入框中的指定字符串,例如 `a_{x}` 转换为 `a_b`
|
||||
- **列名不存在** 写入报错列名不存在,可选处理策略:归档、丢弃、报错、自动增加缺失列
|
||||
> 自动增加缺失列:根据数据信息,自动修改表结构增加列,修改成功后重试
|
||||
- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错
|
||||
- **列自动扩容** 开关选项,打开时,列数据长度超长时将自动修改表结构并重试
|
||||
- **列长度溢出** 写入报错列长度溢出,可选处理策略:归档、丢弃、报错、截断、截断且归档
|
||||
> 截断:截取数据中符合长度限制的前 n 个字符
|
||||
> 截断且归档:截取数据中符合长度限制的前 n 个字符,并且将此行记录写入归档文件
|
||||
- **数据异常** 其他数据异常(未在上方列出的其他异常)的处理策略,可选处理策略:归档、丢弃、报错
|
||||
- **连接超时** 配置目标库连接超时时间,单位“秒”取值范围 1~600
|
||||
- **临时存储文件位置** 配置缓存文件的位置,实际生效位置 `$DATA_DIR/tasks/:id/{location}`
|
||||
- **归档数据保留天数** 非负整数,0 表示无限制
|
||||
- **归档数据可用空间** 0~65535,其中 0 表示无限制
|
||||
- **归档数据文件位置** 配置归档文件的位置,实际生效位置 `$DATA_DIR/tasks/:id/{location}`
|
||||
- **归档数据失败处理策略** 当写入归档文件报错时的处理策略,可选处理策略:删除旧文件、丢弃、报错并停止任务
|
||||
> 删除旧文件:删除旧文件,如果删除旧文件后仍然无法写入,则报错并停止任务
|
||||
> 丢弃:丢弃即将归档的数据
|
||||
> 报错并停止任务:报错并停止当前任务
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 43 KiB After Width: | Height: | Size: 98 KiB |
|
@ -26,18 +26,18 @@ Tableau 是一款知名的商业智能工具,它支持多种数据源,可方
|
|||
|
||||
**第 3 步**,点击 `DSN` 单选框,接着选择已配置好的数据源(MyTDengine),然后点击`连接`按钮。待连接成功后,删除字符串附加部分的内容,最后点击`登录`按钮即可。
|
||||
|
||||

|
||||

|
||||
|
||||
## 数据分析
|
||||
|
||||
**第 1 步**,在工作簿页面中,选择已连接的数据源。点击数据库的下拉列表,会显示需要进行数据分析的数据库。在此基础上,点击表选项中的查找按钮,即可将该数据库下的所有表显示出来。然后,拖动需要分析的表到右侧区域,即可显示出表结构。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 2 步**,点击下方的"立即更新"按钮,即可将表中的数据展示出来。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 3 步**,点击窗口下方的"工作表",弹出数据分析窗口, 并展示分析表的所有字段,将字段拖动到行列即可展示出图表。
|
||||
|
||||

|
||||

|
|
@ -19,22 +19,22 @@ title: 与 Excel 集成
|
|||
|
||||
**第 2 步**,在 Windows 系统环境下启动 Excel,之后选择【数据】->【获取数据】->【自其他源】->【从ODBC】。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 3 步**,在弹出窗口的【数据源名称(DSN)】下拉列表中选择需要连接的数据源后,点击【确定】按钮。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 4 步**,输入 TDengine 的用户名密码。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 5 步**,在弹出的【导航器】对话框中,选择要加载的库表, 并点击【加载】完成数据加载。
|
||||
|
||||

|
||||

|
||||
|
||||
## 数据分析
|
||||
|
||||
选中导入的数据,在【插入】选项卡中选择柱状图,并且在右侧的【数据透视图】中配置数据字段。
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
sidebar_label: FineBI
|
||||
title: 与 FineBI 集成
|
||||
---
|
||||
|
||||
帆软是一家专注于商业智能与数据分析领域的科技企业,凭借自主研发的 FineBI 和 FineReport 两款核心产品在行业内占据重要地位。帆软的 BI 工具广泛应用于各类企业,帮助用户实现数据的可视化分析、报表生成和数据决策支持。
|
||||
|
||||
通过使用 `TDengine Java connector` 连接器,FineBI 可以快速访问 TDengine 的数据。用户可以在 FineBI 中直接连接 TDengine 数据库,获取时序数据进行分析并制作可视化报表,整个过程不需要任何代码编写过程。
|
||||
|
||||
## 前置条件
|
||||
|
||||
准备以下环境:
|
||||
|
||||
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- FineBI 安装(如未安装,请下载并安装 [FineBI 下载](https://www.finebi.com/product/download))。
|
||||
- 下载 `fine_conf_entity` 插件用于支持允许添加JDBC驱动, [下载地址](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d)。
|
||||
- 安装 JDBC 驱动。从 `maven.org` 下载 `TDengine JDBC` 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 或以上版本。
|
||||
|
||||
## 配置数据源
|
||||
|
||||
**第 1 步**,在 FineBI 服务端 `db.script` 配置文件中,找到 `SystemConfig.driverUpload` 配置项并将其修改为 `true`。
|
||||
|
||||
- Windows 系统:配置文件路径是安装目录下 `webapps/webroot/WEB-INF/embed/finedb/db.script`。
|
||||
- Liunx/Mac 系统:配置文件路径是 `/usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script`。
|
||||
|
||||
**第 2 步**,启动 FineBI 服务,在浏览器中输入 `http://ip:37799/webroot/decision`, 其中 ip 是 FineBI 服务端 ip 地址。
|
||||
|
||||
**第 3 步**, 打开 FineBI Web 页面登录后,点击【管理系统】->【插件管理】,在右侧的【应用商城】中点击【从本地安装】选择已下载的 `fine_conf_entity` 插件进行安装。
|
||||
|
||||

|
||||
|
||||
**第 4 步**,点击【管理系统】->【数据连接】->【数据连接管理】,在右侧页面中点击【驱动管理】按钮打开配置页面,点击【新建驱动】按钮并在弹出窗口中输入名称(比如 `tdengine-websocket`),进行 JDBC 驱动配置。
|
||||
|
||||

|
||||
|
||||
**第 5 步**,在驱动配置页面中点击【上传文件】按钮,选择已下载的 `TDengine Java Connector`(比如 `taos-jdbcdriver-3.4.0-dist.jar`)进行上传,上传完成后在【驱动】的下拉列表中选择 `com.taosdata.jdbc.ws.WebSocketDriver`,并点击【保存】。
|
||||
|
||||

|
||||
|
||||
**第 6 步**,在 “数据连接管理” 页面中,点击【新建数据连接】按钮,随后点击 “其他” ,在右侧页面中点击 “其他JDBC” 进行连接配置。
|
||||
|
||||

|
||||
|
||||
**第 7 步**,在配置页面,先输入数据连接名称,接着在【驱动】选项中选择 “自定义”,并从下拉列表里选取已配置的驱动(例如 `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`),之后配置 “数据连接 URL”(例如 `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`)。设置完成后,点击右上角的【测试连接】进行连接测试,验证成功后点击【保存】即可完成配置。
|
||||
|
||||
:::tip
|
||||
`fineBIDialect=mysql` 设置的含义是采用 MySQL 数据库的 SQL 方言规则。简单来说,就是告诉 FineBI 按照 MySQL 数据库处理 SQL 语句的特定方式来解析和执行相关的查询与操作。
|
||||
:::
|
||||
|
||||

|
||||
|
||||
## 数据分析
|
||||
|
||||
### 数据准备
|
||||
|
||||
**第 1 步**,点击【公共数据】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine`), 接着在文件夹的右侧点击【+】按钮,可创建 “数据库表” 数据集或 “SQL数据集”。
|
||||
|
||||

|
||||
|
||||
**第 2 步**,点击 “数据库表”,打开数据库选表页面,在左侧 “数据连接” 中选择已创建的连接,则在右侧会显示当前连接的数据库中的所有表,选择需要加载的表(比如 `meters`),点击【确定】即可显示 `meters` 表中的数据。
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
**第 3 步**,点击 “SQL数据集”,打开 SQL 数据集的配置页面,首先输入表名(用于在 FineBI 页面显示),接着在 “数据来自数据连接” 下拉列表中选择已创建的连接, 之后输入 SQL 语句并点击预览即可看到查询结果,最后点击【确定】SQL 数据集即可创建成功。
|
||||
|
||||

|
||||
|
||||
### 智能电表样例
|
||||
|
||||
**第 1 步**,点击【我的分析】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine),` 接着在文件夹的右侧点击【+】按钮,可创建 “分析主题”。
|
||||
|
||||

|
||||
|
||||
**第 2 步**,在分析主题页面选择数据集(比如 `meters`)后点击【确定】按钮,即可完成数据集关联。
|
||||
|
||||

|
||||
|
||||
**第 3 步**,点击分析主题页面下方的【组件】标签,打开图表配置页面, 拖动字段到横轴或纵轴即可展示出图表。
|
||||
|
||||

|
Before Width: | Height: | Size: 305 KiB |
After Width: | Height: | Size: 481 KiB |
Before Width: | Height: | Size: 753 KiB |
After Width: | Height: | Size: 338 KiB |
Before Width: | Height: | Size: 1.3 MiB |
After Width: | Height: | Size: 773 KiB |
Before Width: | Height: | Size: 643 KiB |
After Width: | Height: | Size: 288 KiB |
Before Width: | Height: | Size: 492 KiB |
After Width: | Height: | Size: 209 KiB |
After Width: | Height: | Size: 57 KiB |
After Width: | Height: | Size: 23 KiB |
After Width: | Height: | Size: 23 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 160 KiB |
After Width: | Height: | Size: 100 KiB |
After Width: | Height: | Size: 28 KiB |
After Width: | Height: | Size: 171 KiB |
After Width: | Height: | Size: 285 KiB |
After Width: | Height: | Size: 33 KiB |
After Width: | Height: | Size: 41 KiB |
After Width: | Height: | Size: 56 KiB |
Before Width: | Height: | Size: 235 KiB |
After Width: | Height: | Size: 398 KiB |
Before Width: | Height: | Size: 238 KiB |
After Width: | Height: | Size: 514 KiB |
Before Width: | Height: | Size: 237 KiB |
After Width: | Height: | Size: 610 KiB |