Merge branch '3.0' into enh/TS-6003-3.0

This commit is contained in:
Alex Duan 2025-03-12 20:51:03 +08:00
commit 8126a38993
226 changed files with 7660 additions and 1642 deletions

View File

@ -6,6 +6,7 @@ on:
- 'main' - 'main'
- '3.0' - '3.0'
- '3.1' - '3.1'
- '3.3.6'
- 'enh/cmake-TD-33848' - 'enh/cmake-TD-33848'
paths-ignore: paths-ignore:

View File

@ -9,17 +9,12 @@ on:
paths-ignore: paths-ignore:
- 'packaging/**' - 'packaging/**'
- 'docs/**' - 'docs/**'
repository_dispatch:
types: [trigger-tests-from-tdinternal]
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.ref || github.event.client_payload.ref}}-${{ github.event_name == 'repository_dispatch' && 'dispatch' || ''}} group: ${{ github.workflow }}-${{ github.ref }}-TDengine
cancel-in-progress: true cancel-in-progress: true
env: env:
CONTAINER_NAME: 'taosd-test'
WKDIR: '/var/lib/jenkins/workspace'
WK: '/var/lib/jenkins/workspace/TDinternal'
WKC: '/var/lib/jenkins/workspace/TDinternal/community' WKC: '/var/lib/jenkins/workspace/TDinternal/community'
jobs: jobs:
@ -28,439 +23,82 @@ jobs:
group: CI group: CI
labels: [self-hosted, Linux, X64, testing] labels: [self-hosted, Linux, X64, testing]
outputs: outputs:
tdinternal: ${{ steps.parameters.outputs.tdinternal }}
run_function_test: ${{ steps.parameters.outputs.run_function_test }} run_function_test: ${{ steps.parameters.outputs.run_function_test }}
run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }} run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }}
source_branch: ${{ steps.parameters.outputs.source_branch }}
target_branch: ${{ steps.parameters.outputs.target_branch }}
pr_number: ${{ steps.parameters.outputs.pr_number }}
steps: steps:
- name: Determine trigger source and fetch parameters - name: Determine trigger source and fetch parameters
id: parameters id: parameters
run: | run: |
set -euo pipefail set -euo pipefail
# check the trigger source and get branch information # target_branch=${{ github.event.pull_request.base.ref }}
if [ "${{ github.event_name }}" == "repository_dispatch" ]; then
tdinternal="true" # # Fetch the latest code from the target branch
source_branch=${{ github.event.client_payload.tdinternal_source_branch }} # cd ${{ env.WKC }}
target_branch=${{ github.event.client_payload.tdinternal_target_branch }} # git reset --hard
pr_number=${{ github.event.client_payload.tdinternal_pr_number }} # git clean -f
# git remote prune origin
# git fetch
# git checkout "$target_branch"
# git remote prune origin
# git pull >/dev/null
# # Check whether to run tdgpt test cases
# changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
# echo "changed files exclude doc: ${changed_files_non_doc}"
# if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
# run_tdgpt_test="true"
# else
# run_tdgpt_test="false"
# fi
# echo "run tdgpt test: ${run_tdgpt_test}"
# # Check whether to run function test cases
# changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
# grep -v "^docs/en/" | \
# grep -v "^docs/zh/" | \
# grep -v ".md$" | \
# grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
# tr '\n' ' ' || :)
# echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
# if [ -n "$changed_files_non_tdgpt" ]; then
# run_function_test="true"
# else
# run_function_test="false"
# fi
# echo "run function test: ${run_function_test}"
run_tdgpt_test="true" run_tdgpt_test="true"
run_function_test="true" run_function_test="true"
else # Output the results for GitHub Actions
tdinternal="false"
source_branch=${{ github.event.pull_request.head.ref }}
target_branch=${{ github.event.pull_request.base.ref }}
pr_number=${{ github.event.pull_request.number }}
# check whether to run tdgpt test cases
cd ${{ env.WKC }}
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
echo “changed files exclude doc, ${changed_files_non_doc}"
if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
run_tdgpt_test="true"
else
run_tdgpt_test="false"
fi
echo "run tdgpt test: ${run_tdgpt_test}"
# check whether to run function test cases
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
grep -v "^docs/en/" | \
grep -v "^docs/zh/" | \
grep -v ".md$" | \
grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
tr '\n' ' ' || :)
echo "changed file exclude tdgpt: ${changed_files_non_tdgpt}"
if [ -n "$changed_files_non_tdgpt" ]; then
run_function_test="true"
else
run_function_test="false"
fi
echo "run function test: ${run_function_test}"
fi
echo "tdinternal=$tdinternal" >> $GITHUB_OUTPUT
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
echo "source_branch=$source_branch" >> $GITHUB_OUTPUT
echo "target_branch=$target_branch" >> $GITHUB_OUTPUT echo ${{ github.event.pull_request.head.ref }}
echo "pr_number=$pr_number" >> $GITHUB_OUTPUT echo ${{ github.event.pull_request.base.ref }}
echo ${{ github.event.pull_request.number }}
run-tests-on-linux: run-tests-on-linux:
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
needs: fetch-parameters needs: fetch-parameters
runs-on: if: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || needs.fetch-parameters.outputs.run_function_test == 'true' }}
group: CI with:
labels: [self-hosted, Linux, X64, testing] tdinternal: false
timeout-minutes: 200 run_function_test: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
env: run_tdgpt_test: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' }}
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
RUN_RUNCTION_TEST: ${{ needs.fetch-parameters.outputs.run_function_test }}
RUN_TDGPT_TEST: ${{ needs.fetch-parameters.outputs.run_tdgpt_test }}
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
steps:
- name: Output the environment information
run: |
echo "::group::Environment Info"
date
hostname
env
echo "Runner: ${{ runner.name }}"
echo "Trigger Source from TDinternal: ${{ env.IS_TDINTERNAL }}"
echo "Workspace: ${{ env.WKDIR }}"
git --version
echo "${{ env.WKDIR }}/restore.sh -p ${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
echo "::endgroup::"
- name: Prepare repositories
run: |
set -euo pipefail
prepare_environment() {
cd "$1"
git reset --hard
git clean -f
git remote prune origin
git fetch
git checkout "$2"
}
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
- name: Get latest codes and logs for TDinternal PR
if: ${{ env.IS_TDINTERNAL == 'true' }}
run: |
cd ${{ env.WK }}
git pull >/dev/null
git log -5
echo "`date "+%Y%m%d-%H%M%S"` TDinternalTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
git checkout -qf FETCH_HEAD
git log -5
echo "TDinternal log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
cd ${{ env.WKC }}
git remote prune origin
git pull >/dev/null
git log -5
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
- name: Get latest codes and logs for TDengine PR
if: ${{ env.IS_TDINTERNAL == 'false' }}
run: |
cd ${{ env.WKC }}
git remote prune origin
git pull >/dev/null
git log -5
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
git checkout -qf FETCH_HEAD
git log -5
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
cd ${{ env.WK }}
git pull >/dev/null
git log -5
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
- name: Update submodule
run: |
cd ${{ env.WKC }}
git submodule update --init --recursive
- name: Output the 'file_no_doc_changed' information to the file
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
run: |
mkdir -p ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}
cd ${{ env.WKC }}
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ env.TARGET_BRANCH }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
echo $changed_files_non_doc > ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt
- name: Check assert testing
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
run: |
cd ${{ env.WKC }}/tests/parallel_test
./run_check_assert_container.sh -d ${{ env.WKDIR }}
- name: Check void function testing
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
run: |
cd ${{ env.WKC }}/tests/parallel_test
./run_check_void_container.sh -d ${{ env.WKDIR }}
- name: Build docker container
if: ${{ env.RUN_RUNCTION_TEST == 'true' }}
run: |
date
rm -rf ${{ env.WKC }}/debug
cd ${{ env.WKC }}/tests/parallel_test
time ./container_build.sh -w ${{ env.WKDIR }} -e
- name: Get parameters for testing
id: get_param
run: |
log_server_file="/home/log_server.json"
timeout_cmd=""
extra_param=""
if [ -f "$log_server_file" ]; then
log_server_enabled=$(jq '.enabled' "$log_server_file")
timeout_param=$(jq '.timeout' "$log_server_file")
if [ "$timeout_param" != "null" ] && [ "$timeout_param" != "0" ]; then
timeout_cmd="timeout $timeout_param"
fi
if [ "$log_server_enabled" == "1" ]; then
log_server=$(jq '.server' "$log_server_file" | sed 's/\\\"//g')
if [ "$log_server" != "null" ] && [ "$log_server" != "" ]; then
extra_param="-w $log_server"
fi
fi
fi
echo "timeout_cmd=$timeout_cmd" >> $GITHUB_OUTPUT
echo "extra_param=$extra_param" >> $GITHUB_OUTPUT
- name: Run function returns with a null pointer scan testing
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
run: |
cd ${{ env.WKC }}/tests/parallel_test
./run_scan_container.sh -d ${{ env.WKDIR }} -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -f ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt ${{ steps.get_param.outputs.extra_param }}
- name: Run tdgpt test cases
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' && env.RUN_TDGPT_TEST == 'true' }}
run: |
cd ${{ env.WKC }}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
timeout 600 time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -l ${{ env.WKDIR }}/log -o 300 ${{ steps.get_param.outputs.extra_param }}
- name: Run function test cases
if: ${{ env.RUN_RUNCTION_TEST == 'true'}}
run: |
cd ${{ env.WKC }}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
${{ steps.get_param.outputs.timeout_cmd }} time ./run.sh -e -m /home/m.json -t cases.task -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -l ${{ env.WKDIR }}/log -o 1200 ${{ steps.get_param.outputs.extra_param }}
run-tests-on-mac: run-tests-on-mac:
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
needs: fetch-parameters needs: fetch-parameters
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }} if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
runs-on: with:
group: CI tdinternal: false
labels: [self-hosted, macOS, testing]
timeout-minutes: 60
env:
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
steps:
- name: Output the environment information
run: |
echo "::group::Environment Info"
date
hostname
env
echo "Runner: ${{ runner.name }}"
echo "Trigger Source from TDinternal: ${{ env.IS_TDINTERNAL }}"
echo "Workspace: ${{ env.WKDIR }}"
git --version
echo "${{ env.WKDIR }}/restore.sh -p ${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
echo "::endgroup::"
- name: Prepare repositories
run: |
set -euo pipefail
prepare_environment() {
cd "$1"
git reset --hard
git clean -f
git remote prune origin
git fetch
git checkout "$2"
}
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
- name: Get latest codes and logs for TDinternal PR
if: ${{ env.IS_TDINTERNAL == 'true' }}
run: |
cd ${{ env.WK }}
git pull >/dev/null
git log -5
echo "`date "+%Y%m%d-%H%M%S"` TDinternalTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
git checkout -qf FETCH_HEAD
git log -5
echo "TDinternal log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
cd ${{ env.WKC }}
git remote prune origin
git pull >/dev/null
git log -5
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
- name: Get latest codes and logs for TDengine PR
if: ${{ env.IS_TDINTERNAL == 'false' }}
run: |
cd ${{ env.WKC }}
git remote prune origin
git pull >/dev/null
git log -5
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
git checkout -qf FETCH_HEAD
git log -5
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
cd ${{ env.WK }}
git pull >/dev/null
git log -5
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
- name: Update submodule
run: |
cd ${{ env.WKC }}
git submodule update --init --recursive
- name: Run tests
run: |
date
cd ${{ env.WK }}
rm -rf debug
mkdir debug
cd ${{ env.WK }}/debug
echo $PATH
echo "PATH=/opt/homebrew/bin:$PATH" >> $GITHUB_ENV
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
make -j10
ctest -j10 || exit 7
date
run-tests-on-windows: run-tests-on-windows:
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
needs: fetch-parameters needs: fetch-parameters
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }} if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
runs-on: with:
group: CI tdinternal: false
labels: [self-hosted, Windows, X64, testing]
timeout-minutes: 126
env:
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
WIN_INTERNAL_ROOT: "C:\\workspace\\0\\TDinternal"
WIN_COMMUNITY_ROOT: "C:\\workspace\\0\\TDinternal\\community"
WIN_SYSTEM_TEST_ROOT: "C:\\workspace\\0\\TDinternal\\community\\tests\\system-test"
WIN_VS_PATH: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat"
WIN_CPU_TYPE: "x64"
steps:
- name: Output the environment information
run: |
hostname
taskkill /f /t /im python.exe
taskkill /f /t /im bash.exe
taskkill /f /t /im taosd.exe
ipconfig
set
date /t
time /t
rd /s /Q "%WIN_INTERNAL_ROOT%\debug" || exit 0
shell: cmd
- name: Prepare repositories
run: |
:: Prepare internal repository
if exist "%WIN_INTERNAL_ROOT%" (
cd /d "%WIN_INTERNAL_ROOT%"
git reset --hard
git clean -f
git remote prune origin
git fetch
git checkout "%TARGET_BRANCH%"
) else (
echo Directory does not exist: "%WIN_INTERNAL_ROOT%"
exit 1
)
:: Prepare community repository
if exist "%WIN_COMMUNITY_ROOT%" (
cd /d "%WIN_COMMUNITY_ROOT%"
git reset --hard
git clean -f
git remote prune origin
git fetch
git checkout "%TARGET_BRANCH%"
) else (
echo Directory does not exist: "%WIN_COMMUNITY_ROOT%"
exit 1
)
shell: cmd
- name: Get latest codes and logs for TDinternal PR
if: ${{ env.IS_TDINTERNAL == 'true' }}
run: |
cd %WIN_INTERNAL_ROOT%
git pull origin %TARGET_BRANCH%
git fetch origin +refs/pull/%PR_NUMBER%/merge
git checkout -qf FETCH_HEAD
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull
shell: cmd
- name: Get latest codes and logs for TDengine PR
if: ${{ env.IS_TDINTERNAL == 'false' }}
run: |
cd %WIN_INTERNAL_ROOT%
git pull origin %TARGET_BRANCH%
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull origin %TARGET_BRANCH%
git fetch origin +refs/pull/%PR_NUMBER%/merge
git checkout -qf FETCH_HEAD
shell: cmd
- name: Output branch and log information
run: |
cd %WIN_INTERNAL_ROOT%
git branch
git log -5
cd %WIN_COMMUNITY_ROOT%
git branch
git log -5
shell: cmd
- name: Update submodule
run: |
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive
shell: cmd
- name: Build on windows
run: |
echo "building ..."
time /t
cd %WIN_INTERNAL_ROOT%
mkdir debug
cd debug
time /t
call "%WIN_VS_PATH%" %WIN_CPU_TYPE%
set CL=/MP8
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
time /t
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true -DBUILD_TOOLS=true || exit 7
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
time /t
jom -j 6 || exit 8
time /t
cd %WIN_COMMUNITY_ROOT%/tests/ci
pip3 install taospy==2.7.21
pip3 install taos-ws-py==0.3.8
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
shell: cmd
- name: Run ctest
run: |
echo "windows ctest ..."
time /t
cd %WIN_INTERNAL_ROOT%\\debug
ctest -j 1 || exit 7
time /t
shell: cmd
- name: Run function test
run: |
echo "windows test ..."
xcopy /e/y/i/f "%WIN_INTERNAL_ROOT%\debug\build\lib\taos.dll" C:\Windows\System32
ls -l "C:\Windows\System32\taos.dll"
time /t
cd %WIN_SYSTEM_TEST_ROOT%
echo "testing ..."
test-all.bat ci
time /t
shell: cmd

51
.github/workflows/tdgpt-ci.yml vendored Normal file
View File

@ -0,0 +1,51 @@
name: TDgpt CI
on:
pull_request:
branches:
- '3.0'
paths:
- 'tools/tdgpt/**'
jobs:
build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
defaults:
run:
working-directory: ${{ github.workspace }}/tools/tdgpt
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest pylint
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Checking the code with pylint
run: |
pylint $(git ls-files '*.py') --exit-zero
- name: Checking the code with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Run test cases with pytest
run: |
pytest

View File

@ -0,0 +1,41 @@
name: TDgpt Update Service
on:
schedule:
- cron: '30 00 * * *'
env:
WKC: "/root/TDengine"
jobs:
update-service:
runs-on:
group: CI
labels: [self-hosted, Linux, X64, tdgpt-anode-service]
steps:
- name: Update TDengine codes
run: |
set -euo pipefail
cd ${{ env.WKC }}
git checkout 3.0
- name: Package the TDGpt Anode Service
run: |
set -euo pipefail
cd ${{ env.WKC }}/tools/tdgpt/script && ./release.sh
- name: Reinstall and restart the TDGpt Anode Service
run: |
set -euo pipefail
cd ${{ env.WKC }}/tools/tdgpt/release
if [[ -f "TDengine-enterprise-anode-1.0.1.tar.gz" ]]; then
tar -xzf TDengine-enterprise-anode-1.0.1.tar.gz
cd TDengine-enterprise-anode-1.0.1
./install.sh
fi
systemctl restart taosanoded
- name: Clean up
if: always()
run: |
if [[ -f ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1 ]] then rm -rf ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1; fi

View File

@ -112,16 +112,6 @@ def build_pre_docs(){
git fetch origin +refs/pull/${CHANGE_ID}/merge git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD git checkout -qf FETCH_HEAD
''' '''
sh '''
cd ${DOC_WKC}/${tools_repo}
git reset --hard
git clean -f
git fetch
git remote prune origin
git checkout ''' + env.CHANGE_TARGET + '''
git pull >/dev/null
'''
} }
} }

View File

@ -145,20 +145,19 @@ Query OK, 10 row(s) in set (2.415961s)
In TDengine, you can use the window clause to perform aggregation queries by time window partitioning, which is particularly suitable for scenarios requiring analysis of large amounts of time-series data, such as smart meters collecting data every 10s but needing to query the average temperature every 1min. In TDengine, you can use the window clause to perform aggregation queries by time window partitioning, which is particularly suitable for scenarios requiring analysis of large amounts of time-series data, such as smart meters collecting data every 10s but needing to query the average temperature every 1min.
The window clause allows you to partition the queried data set by windows and aggregate the data within each window, including: The window clause allows you to partition the queried data set by windows and aggregate the data within each window. The logic of window partitioning is shown in the following image:
- Time window (time window)
- State window (status window)
- Session window (session window)
- Event window (event window)
The logic of window partitioning is shown in the following image:
<figure> <figure>
<Image img={windowModel} alt="Windowing description"/> <Image img={windowModel} alt="Windowing description"/>
<figcaption>Figure 1. Windowing logic</figcaption> <figcaption>Figure 1. Windowing logic</figcaption>
</figure> </figure>
- Time Window: Data is divided based on time intervals, supporting sliding and tumbling time windows, suitable for data aggregation over fixed time periods.
- Status Window: Windows are divided based on changes in device status values, with data of the same status value grouped into one window, which closes when the status value changes.
- Session Window: Sessions are divided based on the differences in record timestamps, with records having a timestamp interval less than the predefined value belonging to the same session.
- Event Window: Windows are dynamically divided based on the start and end conditions of events, opening when the start condition is met and closing when the end condition is met.
- Count Window: Windows are divided based on the number of data rows, with each window consisting of a specified number of rows for aggregation calculations.
The syntax for the window clause is as follows: The syntax for the window clause is as follows:
```sql ```sql

View File

@ -86,9 +86,15 @@ The keep alive interval is the time interval negotiated between the client and t
In **Clean Session**, choose whether to clear the session. The default value is true. In **Clean Session**, choose whether to clear the session. The default value is true.
Fill in the Topic names to be consumed in **Subscription Topics and QoS Configuration**. Use the following format: `topic1::0,topic2::1`. In the **Topics Qos Config**, fill in the topic name and QoS to subscribe. Use the following format: `{topic_name}::{qos}` (e.g., `my_topic::0`). MQTT protocol 5.0 supports shared subscriptions, allowing multiple clients to subscribe to the same topic for load balancing. Use the following format: `$share/{group_name}/{topic_name}::{qos}`, where `$share` is a fixed prefix indicating the enablement of shared subscription, and `group_name` is the client group name, similar to Kafka's consumer group.
Click the **Check Connectivity** button to check if the data source is available. In the **Topic Analysis**, fill in the MQTT topic parsing rules. The format is the same as the MQTT Topic, parsing each level of the MQTT Topic into corresponding variable names, with `_` indicating that the current level is ignored during parsing. For example: if the MQTT Topic `a/+/c` corresponds to the parsing rule `v1/v2/_`, it means assigning the first level `a` to variable `v1`, the value of the second level (where the wildcard `+` represents any value) to variable `v2`, and ignoring the value of the third level `c`, which will not be assigned to any variable. In the `payload parsing` below, the variables obtained from Topic parsing can also participate in various transformations and calculations.
In the **Compression**, configure the message body compression algorithm. After receiving the message, taosX uses the corresponding compression algorithm to decompress the message body and obtain the original data. Options include none (no compression), gzip, snappy, lz4, and zstd, with the default being none.
In the **Char Encoding**, configure the message body encoding format. After receiving the message, taosX uses the corresponding encoding format to decode the message body and obtain the original data. Options include UTF_8, GBK, GB18030, and BIG5, with the default being UTF_8.
Click the **Check Connection** button to check if the data source is available.
<figure> <figure>
<Image img={imgStep05} alt=""/> <Image img={imgStep05} alt=""/>

View File

@ -339,7 +339,7 @@ Helm operates Kubernetes using kubectl and kubeconfig configurations, which can
The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub: The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
```shell ```shell
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-enterpise-3.5.0.tgz wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-enterprise-3.5.0.tgz
``` ```
Note that it's for the enterprise edition, and the community edition is not yet available. Note that it's for the enterprise edition, and the community edition is not yet available.

View File

@ -27,19 +27,19 @@ It should be noted that when configuring the ODBC data source for Tableau, the [
**Step 3**, Click the `DSN` radio button, then select the configured data source (MyTDengine), and click the `Connect` button. After the connection is successful, delete the content of the string attachment, and finally click the `Sign In` button. **Step 3**, Click the `DSN` radio button, then select the configured data source (MyTDengine), and click the `Connect` button. After the connection is successful, delete the content of the string attachment, and finally click the `Sign In` button.
![tableau-odbc](./tableau/tableau-odbc.jpg) ![tableau-odbc](./tableau/tableau-odbc.webp)
## Data Analysis ## Data Analysis
**Step 1**, In the workbook page, the connected data sources will be displayed. Clicking on the dropdown list of databases will display the databases that require data analysis. On this basis, click the search button in the table options to display all tables in the database. Then, drag the table to be analyzed to the right area to display the table structure. **Step 1**, In the workbook page, the connected data sources will be displayed. Clicking on the dropdown list of databases will display the databases that require data analysis. On this basis, click the search button in the table options to display all tables in the database. Then, drag the table to be analyzed to the right area to display the table structure.
![tableau-workbook](./tableau/tableau-table.jpg) ![tableau-workbook](./tableau/tableau-table.webp)
**Step 2**, Click the `Update Now` button below to display the data in the table. **Step 2**, Click the `Update Now` button below to display the data in the table.
![tableau-workbook](./tableau/tableau-data.jpg) ![tableau-workbook](./tableau/tableau-data.webp)
**Step 3**, Click on the "Worksheet" at the bottom of the window to pop up the data analysis window, which displays all the fields of the analysis table. Drag the fields to the rows and columns to display the chart. **Step 3**, Click on the "Worksheet" at the bottom of the window to pop up the data analysis window, which displays all the fields of the analysis table. Drag the fields to the rows and columns to display the chart.
![tableau-workbook](./tableau/tableau-analysis.jpg) ![tableau-workbook](./tableau/tableau-analysis.webp)

View File

@ -21,22 +21,22 @@ Prepare the following environment:
**Step 2**, Start Excel in the Windows system environment, then select [Data] -> [Get Data] -> [From Other Sources] -> [From ODBC]. **Step 2**, Start Excel in the Windows system environment, then select [Data] -> [Get Data] -> [From Other Sources] -> [From ODBC].
![excel-odbc](./excel/odbc-menu.jpg) ![excel-odbc](./excel/odbc-menu.webp)
**Step 3**, In the pop-up window, select the data source you need to connect to from the drop-down list of [Data source name (DSN)], and then click the [OK] button. **Step 3**, In the pop-up window, select the data source you need to connect to from the drop-down list of [Data source name (DSN)], and then click the [OK] button.
![excel-odbc](./excel/odbc-select.jpg) ![excel-odbc](./excel/odbc-select.webp)
**Step 4**, Enter the username and password for TDengine. **Step 4**, Enter the username and password for TDengine.
![excel-odbc](./excel/odbc-config.jpg) ![excel-odbc](./excel/odbc-config.webp)
**Step 5**, In the pop-up [Navigator] dialog box, select the database tables you want to load, and then click [Load] to complete the data loading. **Step 5**, In the pop-up [Navigator] dialog box, select the database tables you want to load, and then click [Load] to complete the data loading.
![excel-odbc](./excel/odbc-load.jpg) ![excel-odbc](./excel/odbc-load.webp)
## Data Analysis ## Data Analysis
Select the imported data. On the [Insert] tab, choose the column chart, and then configure the data fields in the [PivotChart Fields] pane on the right. Select the imported data. On the [Insert] tab, choose the column chart, and then configure the data fields in the [PivotChart Fields] pane on the right.
![excel-odbc](./excel/odbc-data.jpg) ![excel-odbc](./excel/odbc-data.webp)

View File

@ -0,0 +1,82 @@
---
sidebar_label: FineBI
title: Integration With FineBI
toc_max_heading_level: 4
---
Fanruan is a technology company specializing in the field of business intelligence and data analytics. With its self-developed core products, FineBI and FineReport, the company has established a leading position in the industry. Fanruan's BI tools are widely adopted by enterprises across various sectors, empowering users to achieve data visualization analysis, report generation, and data-driven decision support.
By using the TDengine Java connector, FineBI can quickly access the data in TDengine. Users can directly connect to the TDengine database in FineBI, obtain time-series data for analysis, and create visual reports, and the entire process does not require any code writing.
## Prerequisites
- TDengine 3.3.4.0 and above version is installed and running normally (both Enterprise and Community versions are available).
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
- Install FineBI (if not installed, please download and install [Download FineBI](https://intl.finebi.com/download)).
- Download the fine_conf_entity plugin to support the addition of JDBC drivers, [Download link](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d).
- Install the JDBC driver. Download the `TDengine JDBC connector` file `taos-jdbcdriver-3.4.0-dist.jar` or a higher version from `maven.org`.
## Configure Data Source
**Step 1**, In the `db.script` configuration file of the FineBI server, find the `SystemConfig.driverUpload` configuration item and change its value to true.
- Windows system: The path of the configuration file is webapps/webroot/WEB-INF/embed/finedb/db.script under the installation directory.
- Linux/Mac system: The path of the configuration file is /usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script.
**Step 2**, Start the FineBI service. Enter `http://ip:37799/webroot/decision` in the browser, where "ip" is the IP address of the FineBI server.
**Step 3**, After logging in to the FineBI Web page, click [System Management] -> [Plugin Management]. In the [Store App] on the right side, click [Install From Local] and select the downloaded `fine_conf_entity` plugin for installation.
![finebi-workbook](./finebi/plugin.webp)
**Step 4**, Click [System Management] -> [Data Connection] -> [Data Connection Management]. On the right-hand page, click the [Driver Management] button to open the configuration page. Then click the [New Driver] button, and in the pop-up window, enter a name (for example, `tdengine-websocket`) to configure the JDBC driver.
![finebi-workbook](./finebi/connect-manage.webp)
**Step 5**, On the driver configuration page, click the [Upload File] button. Select the downloaded TDengine Java Connector (e.g., `taos-jdbcdriver-3.4.0-dist.jar`) for uploading. After the upload is complete, select `com.taosdata.jdbc.ws.WebSocketDriver` from the drop-down list of [Driver], and then click [Save].
![finebi-workbook](./finebi/new-driver.webp)
**Step 6**, On the "Data Connection Management" page, click the [New Data Connection] button. Subsequently, click "Others", and then on the right-side page, click "Other JDBC" to perform the connection configuration.
![finebi-workbook](./finebi/jdbc-connect.webp)
**Step 7**, On the configuration page, first enter the name of the data connection. Then, select "Custom" in the [Driver] option and choose the configured driver from the drop-down list (e.g., `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`). After that, configure the "Data Connection URL" (e.g., `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`). Once the settings are completed, click [Test Connection] in the top-right corner to test the connection. After the verification is successful, click [Save] to finish the configuration.
:::tip
`fineBIDialect=mysql` The meaning of this setting is to adopt the SQL dialect rules of the MySQL database. Simply put, it tells FineBI to parse and execute relevant queries and operations in the specific way that the MySQL database handles SQL statements.
:::
![finebi-workbook](./finebi/jdbc-config.webp)
## Data Analysis
### Data preparation
**Step 1**, Click [Public Data]. On the right - hand page, click [New Folder] to create a folder (e.g., TDengine). Then, click the [+] button on the right side of the folder to create a "Database Table" dataset or an "SQL Dataset".
![finebi-workbook](./finebi/common.webp)
**Step 2**, Click "Database Table" to open the database table selection page. In the "Data Connection" section on the left, select the previously created connection. Then, all the tables in the database of the current connection will be displayed on the right. Select the table you need to load (e.g., meters), and click [OK]. The data in the meters table will then be displayed.
![finebi-workbook](./finebi/select-table.webp)
![finebi-workbook](./finebi/table-data.webp)
**Step 3**, Click "SQL Dataset" to open the configuration page for the SQL dataset. First, enter the table name (used for display on the FineBI page). Then, select the previously created connection from the drop-down list of "Data from Data Connection". After that, enter the SQL statement and click "Preview" to view the query results. Finally, click [OK] to successfully create the SQL dataset.
![finebi-workbook](./finebi/sql-data-config.webp)
### Smart Meter Example
**Step 1**, Click [My Analysis]. On the right-hand page, click [New Folder] to create a folder (for example, `TDengine`). Then, click the [+] button on the right side of the folder to create an "Analysis Subject".
![finebi-workbook](./finebi/analysis-object.webp)
**Step 2**, On the analysis subject page, select the dataset (for example, `meters`) and then click the [OK] button to complete the association of the dataset.
![finebi-workbook](./finebi/load-data.webp)
**Step 3**, Click the [Component] tab at the bottom of the analysis subject page to open the chart configuration page. Drag the fields to the horizontal axis or the vertical axis, and then the chart will be displayed.
![finebi-workbook](./finebi/analysis-chart.webp)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 300 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 761 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 324 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 769 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 659 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 286 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 505 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 243 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 389 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 255 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 543 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 593 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

View File

@ -43,7 +43,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails| |resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000| |timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed| |maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 0-86400000,in milliseconds, default value 10000| |maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 3000-86400000,in milliseconds, default value 10000|
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10| |shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900| |readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|

View File

@ -44,6 +44,7 @@ The TDengine client driver provides all the APIs needed for application programm
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages| |enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding| |minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval| |minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|compareAsStrInGreatest | v3.3.6.0 |Supported, effective immediately |When the greatest and least functions have both numeric and string types as parameters, the comparison type conversion rules are as follows: Integer; 1: uniformly converted to string comparison, 0: uniformly converted to numeric type comparison.|
### Writing Related ### Writing Related

View File

@ -371,10 +371,14 @@ Specify the configuration parameters for tag and data columns in `super_tables`
### Query Parameters ### Query Parameters
In query scenarios, `filetype` must be set to `query`. `filetype` must be set to `query`.
`query_mode` connect method:
- "taosc": Native.
- "rest" : RESTful.
`query_times` specifies the number of times to run the query, numeric type. `query_times` specifies the number of times to run the query, numeric type.
Query scenarios can control the execution of slow query statements by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters, where threshold controls that queries exceeding the specified exec_usec time will be killed by taosBenchmark, in seconds; interval controls the sleep time to avoid continuous slow query CPU consumption, in seconds.
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters) For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
@ -387,8 +391,21 @@ Configuration parameters for querying specified tables (can specify supertables,
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads` The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
`Mixed Query`: `Mixed Query`:
All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries. All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times` The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`.
- **batch_query** : Batch query power switch.
"yes": indicates that it is enabled.
"no": indicates that it is not enabled, and other values report errors.
Batch query refers to dividing all SQL statements in SQL into `threads` groups, with each thread executing one group.
Each SQL statement is queried only once before exiting, and the main thread waits for all threads to complete before determining if the `query_interval` parameter is set. If sleep is required for a specified time, each thread group is restarted and the previous process is repeated until the number of queries is exhausted.
Functional limitations:
- Only supports scenarios where `mixed_query` is set to 'yes'.
- Restful queries are not supported, meaning `query_made` cannot be 'rest'.
- **query_interval** : Query interval, in millisecond, default is 0. - **query_interval** : Query interval, in millisecond, default is 0.
When the 'batch_query' switch is turned on, it indicates the interval time after each batch query is completed, When closed, it indicates the interval time between each SQL query completion.
If the execution time of the query exceeds the interval time, it will no longer wait. If the execution time of the query is less than the interval time, it is necessary to wait to make up for the interval time.
- **threads** : Number of threads executing the SQL query, default is 1. - **threads** : Number of threads executing the SQL query, default is 1.
- **sqls**: - **sqls**:
- **sql**: The SQL command to execute, required. - **sql**: The SQL command to execute, required.

View File

@ -124,7 +124,39 @@ FLOOR(expr)
``` ```
**Function Description**: Gets the floor of the specified field. **Function Description**: Gets the floor of the specified field.
Other usage notes see CEIL function description. Other usage notes see [CEIL](#ceil) function description.
#### GREATEST
```sql
GREATEST(expr1, expr2[, expr]...)
```
**Function Description**: Get the maximum value of all input parameters. The minimum number of parameters for this function is 2.
**Version**ver-3.3.6.0
**Return Type**Refer to the comparison rules. The comparison type is the final return type.
**Applicable Data Types**:
- Numeric types: timestamp, bool, integer and floating point types
- Strings types: nchar and varchar types.
**Comparison rules**: The following rules describe the conversion method of the comparison operation:
- If any parameter is NULL, the comparison result is NULL.
- If all parameters in the comparison operation are string types, compare them as string types
- If all parameters are numeric types, compare them as numeric types.
- If there are both string types and numeric types in the parameters, according to the `compareAsStrInGreatest` configuration item, they are uniformly compared as strings or numeric values. By default, they are compared as strings.
- In all cases, when different types are compared, the comparison type will choose the type with a larger range for comparison. For example, when comparing integer types, if there is a BIGINT type, BIGINT will definitely be selected as the comparison type.
**Related configuration items**: Client configuration, compareAsStrInGreatest is 1, which means that both string types and numeric types are converted to string comparisons, and 0 means that they are converted to numeric types. The default is 1.
#### LEAST
```sql
LEAST(expr1, expr2[, expr]...)
```
**Function Description**Get the minimum value of all input parameters. The rest of the description is the same as the [GREATEST](#greatest) function.
#### LOG #### LOG

View File

@ -94,7 +94,7 @@ The sink task is responsible for receiving the output results from the agg task
The above three types of tasks each play their roles in the stream computing architecture, distributed at different levels. Clearly, the number of source tasks directly depends on the number of vnodes, with each source task independently handling the data in its vnode without interference from other source tasks, and there are no sequential constraints. However, it is worth noting that if the final stream computing results converge to one table, then only one sink task will be deployed on the vnode where that table is located. The collaborative relationship between these three types of tasks is shown in the following diagram, together forming the complete execution process of stream computing tasks. The above three types of tasks each play their roles in the stream computing architecture, distributed at different levels. Clearly, the number of source tasks directly depends on the number of vnodes, with each source task independently handling the data in its vnode without interference from other source tasks, and there are no sequential constraints. However, it is worth noting that if the final stream computing results converge to one table, then only one sink task will be deployed on the vnode where that table is located. The collaborative relationship between these three types of tasks is shown in the following diagram, together forming the complete execution process of stream computing tasks.
<figure> <figure>
<Image img={imgStep02} alt="Relationships between tasks"/> <Image img={imgStep03} alt="Relationships between tasks"/>
<figcaption>Figure 3. Relationships between tasks</figcaption> <figcaption>Figure 3. Relationships between tasks</figcaption>
</figure> </figure>

View File

@ -0,0 +1,293 @@
---
title: DST(Daylight Saving Time) Usage
description: Explanation and suggestions for using DST(Daylight Saving Time) in TDengine
---
## Background
In the use of time-series databases, there are times when Daylight Saving Time (DST) is encountered. We analyze and explain the use and issues of DST in TDengine to help you use TDengine more smoothly.
## Definitions
### Time Zone
A time zone is a region on Earth that uses the same standard time. Due to the Earth's rotation, to ensure that the time in each place is coordinated with the local sunrise and sunset, the world is divided into multiple time zones.
### IANA Time Zone
The IANA (Internet Assigned Numbers Authority) time zone database, also known as the tz database, provides a standard reference for global time zone information. It is the basis for modern systems and software to handle time zone-related operations.
IANA uses the "Region/City" format (e.g., Europe/Berlin) to clearly identify time zones.
TDengine supports the use of IANA time zones in different components (except for the time zone settings in Windows taos.cfg).
### Standard Time and Local Time
Standard time is the time determined based on a fixed meridian on Earth. It provides a unified reference point for each time zone.
- Greenwich Mean Time (GMT): Historically used reference time, located at the 0° meridian.
- Coordinated Universal Time (UTC): The modern time standard, similar to GMT but more precise.
The relationship between standard time and time zones is as follows:
- Reference: Standard time (e.g., UTC) is the reference point for setting time zones.
- Offset: Different time zones are defined by their offset from standard time. For example, UTC+1 means 1 hour ahead of UTC.
- Regional Division: The world is divided into multiple time zones, each using one or more standard times.
Relative to standard time, each region sets its local time based on its time zone:
- Time Zone Offset: Local time equals standard time plus the offset of the time zone. For example, UTC+2 means 2 hours ahead of UTC.
- Daylight Saving Time (DST): Some regions adjust their local time during specific periods, such as moving the clock forward by one hour. See the next section for details.
### Daylight Saving Time
Daylight Saving Time (DST) is a system that advances the time by one hour to make better use of daylight and save energy. It usually starts in spring and ends in autumn. The specific start and end times of DST vary by region. The following explanation uses Berlin time as an example to illustrate DST and its effects.
![DST Berlin](./02-dst/dst-berlin.png)
According to this rule, you can see:
- The time between 02:00:00 and 03:00:00 (excluding 03:00:00) on March 31, 2024, in Berlin local time does not exist (jump).
- The time between 02:00:00 and 03:00:00 (excluding 03:00:00) on October 27, 2024, in Berlin local time appears twice.
#### DST and the IANA Time Zone Database
- Recording Rules: The IANA time zone database records detailed DST rules for each region, including the start and end dates and times.
- Automatic Adjustment: Many operating systems and software use the IANA database to automatically handle DST adjustments.
- Historical Changes: The IANA database also tracks historical DST changes to ensure accuracy.
#### DST and Timestamp Conversion
- Converting a timestamp to local time is deterministic. For example, 1729990654 is Berlin time DST 2024-10-27 02:57:34, and 1729994254 is Berlin time standard time 2024-10-27 02:57:34 (these two local times are the same except for the time offset).
- Without specifying the time offset, converting local time to a timestamp is indeterminate. The time skipped during DST does not exist and cannot be converted to a timestamp, such as Berlin time 2024-03-31 02:34:56 does not exist and cannot be converted to a timestamp. The repeated time during the end of DST cannot determine which timestamp it is, such as 2024-10-27 02:57:34 without specifying the time offset cannot determine whether it is 1729990654 or 1729994254. Specifying the time offset can determine the timestamp, such as 2024-10-27 02:57:34 CEST(+02:00), specifying DST 2024-10-27 02:57:34 timestamp 1729990654.
### RFC3339 Time Format
RFC 3339 is an internet time format standard used to represent dates and times. It is based on the ISO 8601 standard but specifies some format details more specifically.
The format is as follows:
- Basic Format: `YYYY-MM-DDTHH:MM:SSZ`
- Time Zone Representation:
- Z represents Coordinated Universal Time (UTC).
- Offset format, such as +02:00, represents the time difference from UTC.
With explicit time zone offsets, the RFC 3339 format can accurately parse and compare times globally.
The advantages of RFC 3339 include:
- Standardization: Provides a unified format for easy cross-system data exchange.
- Clarity: Clearly indicates time zone information, avoiding time misunderstandings.
TDengine uses the RFC3339 format for display in REST API and Explorer UI. In SQL statements, you can use the RFC3339 format to write timestamp data:
```sql
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
```
### Undefined Behavior
Undefined behavior refers to specific code or operations that do not have a clearly defined result and do not guarantee compatibility with that result. TDengine may modify the current behavior in a future version without notifying users. Therefore, users should not rely on the current undefined behavior for judgment or application in TDengine.
## Writing and Querying DST in TDengine
We use the following table to show the impact of DST on writing and querying.
![DST Table](./02-dst/dst-table.png)
### Table Explanation
- **TIMESTAMP**: TDengine uses a 64-bit integer to store raw timestamps.
- **UTC**: The UTC time representation corresponding to the timestamp.
- **Europe/Berlin**: The RFC3339 format time corresponding to the Europe/Berlin time zone.
- **Local**: The local time corresponding to the Europe/Berlin time zone (without time zone).
### Table Analysis
- At the **start of DST** (Berlin time March 31, 02:00), the time jumps directly from 02:00 to 03:00 (one hour forward).
- Light green is the timestamp one hour before the start of DST;
- Dark green is the timestamp one hour after the start of DST;
- Red indicates that the nonexistent local time was inserted into the TDengine database:
- Using SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` to insert data from `2024-03-31 02:00:00` to `2024-03-31 02:59:59` will be automatically adjusted to -1000 (in TDengine, this is undefined behavior, currently this value is related to the database precision, millisecond database is -1000, microsecond database is -1000000, nanosecond database is -1000000000), because that moment does not exist in local time;
- At the **end of DST** (Berlin time October 27, 03:00), the time jumps from 03:00 to 02:00 (one hour back).
- Light blue indicates the timestamp one hour before the clock jump;
- Dark blue indicates the timestamp within one hour after the clock jump, its local time without time zone is the same as the previous hour.
- Purple indicates the timestamp one hour after the clock jump;
- **Local Time Changes**: It can be seen that due to the adjustment of DST, local time changes, which may cause some time periods to appear repeated or missing.
- **UTC Time Unchanged**: UTC time remains unchanged, ensuring the consistency and order of time.
- **RFC3339**: The RFC3339 format time shows the change in time offset, changing to +02:00 after the start of DST and to +01:00 after the end of DST.
- **Conditional Query**:
- At the **start of DST**, the skipped time (`[03-31 02:00:00,03-31 03:00:00)`) does not exist, so using that time for queries results in undefined behavior: `SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'` (the nonexistent local timestamp is converted to `-1000`):
```sql
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
ts |
=================
-1000 |
Query OK, 1 row(s) in set (0.003635s)
```
When the nonexistent timestamp is used together with the existing timestamp, the result is also not as expected, as shown below where the start local time does not exist:
```sql
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
ts | to_iso8601(ts,'Z') |
==================================================
-1000 | 1969-12-31T23:59:59.000Z |
1711843200000 | 2024-03-31T00:00:00.000Z |
1711846799000 | 2024-03-31T00:59:59.000Z |
1711846800000 | 2024-03-31T01:00:00.000Z |
1711846801000 | 2024-03-31T01:00:01.000Z |
Query OK, 5 row(s) in set (0.003339s)
```
In the following statements, the first SQL query end time does not exist, and the second end time exists. The first SQL query result is not as expected:
```sql
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
Query OK, 0 row(s) in set (0.000930s)
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
ts | to_iso8601(ts,'Z') |
==================================================
1711843200000 | 2024-03-31T00:00:00.000Z |
1711846799000 | 2024-03-31T00:59:59.000Z |
Query OK, 2 row(s) in set (0.001227s)
```
- At the end of DST, the repeated time (`[10-27 02:00:00,10-27 03:00:00)` excluding `10-27 03:00:00`) appears twice, and using that time range for queries in TDengine is also undefined behavior.
- Querying the data between `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` includes the repeated timestamps and the data at `2024-10-27 03:00:00`:
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=======================================================================================
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
Query OK, 5 row(s) in set (0.001370s)
```
- However, the following query for the range [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] can only find the data at the first 2024-10-27 02:00:00 time point:
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=======================================================================================
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
Query OK, 1 row(s) in set (0.004480s)
```
- The following query for the range [2024-10-27 02:00:01,2024-10-27 02:57:35] can find 3 rows of data (including one row of local time data at 02:59:59):
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
===============================================================================================
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
Query OK, 3 row(s) in set (0.004428s)
```
## Summary and Suggestions
### Summary
This explanation only addresses the impact of using local time. Using UNIX timestamps or RFC3339 has no impact.
- Writing:
- It is not possible to write data for nonexistent times during the DST transition.
- Writing data for repeated times during the DST transition is undefined behavior.
- Querying:
- Querying with conditions that specify the skipped time during the start of DST results in undefined behavior.
- Querying with conditions that specify the repeated time during the end of DST results in undefined behavior.
- Display:
- Displaying with time zones is not affected.
- Displaying local time is accurate, but repeated times during the end of DST cannot be distinguished.
- Users should be cautious when using time without time zones for display and application.
### Suggestions
To avoid unnecessary impacts of DST on querying and writing in TDengine, it is recommended to use explicit time offsets for writing and querying.
- Use UNIX Timestamps: Using UNIX timestamps can avoid time zone issues.
| TIMESTAMP | UTC | Europe/Berlin | Local |
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
```sql
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
Insert OK, 2 row(s) affected (0.001434s)
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
ts | v1 |
===============================
1711846799000 | 1 |
1711846800000 | 2 |
Query OK, 2 row(s) in set (0.003503s)
```
- Use RFC3339 Time Format: The RFC3339 time format with time zone offsets can effectively avoid the uncertainty of DST.
| TIMESTAMP | UTC | Europe/Berlin | Local |
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
```sql
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
('2024-10-27T02:59:59.000+02:00', 2)
('2024-10-27T02:00:00.000+01:00', 3)
('2024-10-27T02:59:59.000+01:00', 4);
Insert OK, 4 row(s) affected (0.001514s)
taos> SELECT *,
to_iso8601(ts,'Z'),
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
AND ts <= '2024-10-27T02:59:59.000+01:00';
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=====================================================================================================
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
Query OK, 4 row(s) in set (0.004275s)
taos> SELECT *,
to_iso8601(ts,'Z'),
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
AND ts <= '2024-10-27T02:59:59.000+02:00';
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=====================================================================================================
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
Query OK, 2 row(s) in set (0.004275s)
```
- Pay Attention to Time Zone Settings When Querying: When querying and displaying, if local time is needed, be sure to consider the impact of DST.
- taosAdapter: When using the REST API, it supports setting the IANA time zone, and the result is returned in RFC3339 format.
```shell
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
-d "select ts from tz1.t1"
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
```
- Explorer: When using the Explorer page for SQL queries, users can configure the client time zone to display in RFC3339 format.
![Explorer DST](./02-dst/explorer-with-tz.png)
## Reference Documents
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)

Binary file not shown.

After

Width:  |  Height:  |  Size: 234 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 268 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

After

Width:  |  Height:  |  Size: 216 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 243 KiB

After

Width:  |  Height:  |  Size: 334 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

After

Width:  |  Height:  |  Size: 293 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 103 KiB

After

Width:  |  Height:  |  Size: 263 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 282 KiB

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 338 KiB

After

Width:  |  Height:  |  Size: 264 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 501 KiB

After

Width:  |  Height:  |  Size: 342 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 140 KiB

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 149 KiB

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 117 KiB

After

Width:  |  Height:  |  Size: 137 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 133 KiB

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

After

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 178 KiB

After

Width:  |  Height:  |  Size: 135 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 190 KiB

After

Width:  |  Height:  |  Size: 283 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 61 KiB

After

Width:  |  Height:  |  Size: 80 KiB

View File

@ -4,6 +4,10 @@ title: TDengine 数据查询
toc_max_heading_level: 4 toc_max_heading_level: 4
--- ---
import win from './window.png';
import swin from './session-window.png';
import ewin from './event-window.png';
相较于其他众多时序数据库和实时数据库TDengine 的一个独特优势在于,自其首个版本发布之初便支持标准的 SQL 查询功能。这一特性极大地降低了用户在使用过程中的学习难度。本章将以智能电表的数据模型为例介绍如何在 TDengine 中运用 SQL 查询来处理时序数据。如果需要进一步了解 SQL 语法的细节和功能,建议参阅 TDengine 的官方文档。通过本章的学习,你将能够熟练掌握 TDengine 的 SQL 查询技巧,进而高效地对时序数据进行操作和分析。 相较于其他众多时序数据库和实时数据库TDengine 的一个独特优势在于,自其首个版本发布之初便支持标准的 SQL 查询功能。这一特性极大地降低了用户在使用过程中的学习难度。本章将以智能电表的数据模型为例介绍如何在 TDengine 中运用 SQL 查询来处理时序数据。如果需要进一步了解 SQL 语法的细节和功能,建议参阅 TDengine 的官方文档。通过本章的学习,你将能够熟练掌握 TDengine 的 SQL 查询技巧,进而高效地对时序数据进行操作和分析。
## 基本查询 ## 基本查询
@ -136,16 +140,15 @@ Query OK, 10 row(s) in set (2.415961s)
在 TDengine 中,你可以使用窗口子句来实现按时间窗口切分方式进行聚合结果查询,这种查询方式特别适用于需要对大量时间序列数据进行分析的场景,例如智能电表每 10s 采集一次数据,但需要查询每隔 1min 的温度平均值。 在 TDengine 中,你可以使用窗口子句来实现按时间窗口切分方式进行聚合结果查询,这种查询方式特别适用于需要对大量时间序列数据进行分析的场景,例如智能电表每 10s 采集一次数据,但需要查询每隔 1min 的温度平均值。
窗口子句允许你针对查询的数据集合按照窗口进行切分,并对每个窗口内的数据进行聚合,包含: 窗口子句允许你针对查询的数据集合按照窗口进行切分,并对每个窗口内的数据进行聚合。窗口划分逻辑如下图所示。
- 时间窗口time window
- 状态窗口status window
- 会话窗口session window
- 事件窗口event window
- 计数窗口count window
窗口划分逻辑如下图所示: <img src={win} width="500" alt="常用窗口划分逻辑" />
![常用窗口划分逻辑](./window.png) - 时间窗口time window根据时间间隔划分数据支持滑动时间窗口和翻转时间窗口适用于按固定时间周期进行数据聚合。
- 状态窗口status window基于设备状态值的变化划分窗口相同状态值的数据归为一个窗口状态值改变时窗口关闭。
- 会话窗口session window根据记录的时间戳差异划分会话时间戳间隔小于预设值的记录属于同一会话。
- 事件窗口event window基于事件的开始条件和结束条件动态划分窗口满足开始条件时窗口开启满足结束条件时窗口关闭。
- 计数窗口count window根据数据行数划分窗口每达到指定行数即为一个窗口并进行聚合计算。
窗口子句语法如下: 窗口子句语法如下:
@ -408,7 +411,8 @@ Query OK, 22 row(s) in set (0.153403s)
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:102019-04-28 14:22:30] 和 [2019-04-28 14:23:102019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒超过了连续时间间隔12 秒)。 会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:102019-04-28 14:22:30] 和 [2019-04-28 14:23:102019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒超过了连续时间间隔12 秒)。
![会话窗口示意图](./session-window.png) <img src={swin} width="320" alt="会话窗口示意图" />
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val则自动开启下一个窗口。 在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val则自动开启下一个窗口。
@ -461,7 +465,7 @@ Query OK, 10 row(s) in set (0.043489s)
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10 select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10
``` ```
![事件窗口示意图](./event-window.png) <img src={ewin} width="350" alt="事件窗口示意图" />
示例 SQL: 示例 SQL:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 188 KiB

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 188 KiB

After

Width:  |  Height:  |  Size: 97 KiB

View File

@ -3,7 +3,12 @@ title: "常见问题"
sidebar_label: "常见问题" sidebar_label: "常见问题"
--- ---
<b>1. 创建 anode 失败,返回指定服务无法访问</b> ### 1. 安装过程中编译 uWSGI 失败,如何处理
TDgpt 安装过程中需要在本地编译 uWSGI某些环境的 Python例如anaconda安装 uWSGI 会出现冲突导致编译失败,安装流程因此无法继续下去。这种情况下可以尝试在安装过程中忽略 uWSGI的安装。
由于忽略了 uWSGI 安装,后续启动 taosasnode 服务的时候,需要手动输入命令进行启动 `python3.10 /usr/local/taos/taosanode/lib/taosanalytics/app.py` 。 执行该命令的时候请确保使用了虚拟环境中的 Python 程序才能加载依赖库。
### 2. 创建 anode 失败,返回指定服务无法访问
```bash ```bash
taos> create anode '127.0.0.1:6090'; taos> create anode '127.0.0.1:6090';
@ -26,7 +31,7 @@ curl: (7) Failed to connect to 127.0.0.1 port 6090: Connection refused
>请勿使用 systemctl status taosanode 检查 taosanode 是否正常 >请勿使用 systemctl status taosanode 检查 taosanode 是否正常
<b>2. 服务正常,查询过程返回服务不可用</b> ### 3. 服务正常,查询过程返回服务不可用
```bash ```bash
taos> select _frowts,forecast(current, 'algo=arima, alpha=95, wncheck=0, rows=20') from d1 where ts<='2017-07-14 10:40:09.999'; taos> select _frowts,forecast(current, 'algo=arima, alpha=95, wncheck=0, rows=20') from d1 where ts<='2017-07-14 10:40:09.999';
@ -34,7 +39,7 @@ DB error: Analysis service can't access[0x80000441] (60.195613s)
``` ```
数据分析默认超时时间是 60s出现这个问题的原因是输入数据分析过程超过默认的最长等待时间请尝试采用限制数据输入范围的方式将输入数据规模减小或者更换分析算法再次尝试。 数据分析默认超时时间是 60s出现这个问题的原因是输入数据分析过程超过默认的最长等待时间请尝试采用限制数据输入范围的方式将输入数据规模减小或者更换分析算法再次尝试。
<b>3. 返回结果出现非法 JSON 格式错误 (Invalid json format) </b> ### 4. 返回结果出现非法 JSON 格式错误 (Invalid json format)
从 anode 返回到 TDengine 的分析结果有误,请检查 anode 运行日志 `/var/log/taos/taosanode/taosanode.app.log`,以便于获得具体的错误信息。 从 anode 返回到 TDengine 的分析结果有误,请检查 anode 运行日志 `/var/log/taos/taosanode/taosanode.app.log`,以便于获得具体的错误信息。

View File

@ -26,18 +26,18 @@ Tableau 是一款知名的商业智能工具,它支持多种数据源,可方
**第 3 步**,点击 `DSN` 单选框,接着选择已配置好的数据源(MyTDengine),然后点击`连接`按钮。待连接成功后,删除字符串附加部分的内容,最后点击`登录`按钮即可。 **第 3 步**,点击 `DSN` 单选框,接着选择已配置好的数据源(MyTDengine),然后点击`连接`按钮。待连接成功后,删除字符串附加部分的内容,最后点击`登录`按钮即可。
![tableau-odbc](./tableau/tableau-odbc.jpg) ![tableau-odbc](./tableau/tableau-odbc.webp)
## 数据分析 ## 数据分析
**第 1 步**,在工作簿页面中,选择已连接的数据源。点击数据库的下拉列表,会显示需要进行数据分析的数据库。在此基础上,点击表选项中的查找按钮,即可将该数据库下的所有表显示出来。然后,拖动需要分析的表到右侧区域,即可显示出表结构。 **第 1 步**,在工作簿页面中,选择已连接的数据源。点击数据库的下拉列表,会显示需要进行数据分析的数据库。在此基础上,点击表选项中的查找按钮,即可将该数据库下的所有表显示出来。然后,拖动需要分析的表到右侧区域,即可显示出表结构。
![tableau-workbook](./tableau/tableau-table.jpg) ![tableau-workbook](./tableau/tableau-table.webp)
**第 2 步**,点击下方的"立即更新"按钮,即可将表中的数据展示出来。 **第 2 步**,点击下方的"立即更新"按钮,即可将表中的数据展示出来。
![tableau-workbook](./tableau/tableau-data.jpg) ![tableau-workbook](./tableau/tableau-data.webp)
**第 3 步**,点击窗口下方的"工作表",弹出数据分析窗口, 并展示分析表的所有字段,将字段拖动到行列即可展示出图表。 **第 3 步**,点击窗口下方的"工作表",弹出数据分析窗口, 并展示分析表的所有字段,将字段拖动到行列即可展示出图表。
![tableau-workbook](./tableau/tableau-analysis.jpg) ![tableau-workbook](./tableau/tableau-analysis.webp)

View File

@ -19,22 +19,22 @@ title: 与 Excel 集成
**第 2 步**,在 Windows 系统环境下启动 Excel之后选择【数据】->【获取数据】->【自其他源】->【从ODBC】。 **第 2 步**,在 Windows 系统环境下启动 Excel之后选择【数据】->【获取数据】->【自其他源】->【从ODBC】。
![excel-odbc](./excel/odbc-menu.jpg) ![excel-odbc](./excel/odbc-menu.webp)
**第 3 步**,在弹出窗口的【数据源名称(DSN)】下拉列表中选择需要连接的数据源后,点击【确定】按钮。 **第 3 步**,在弹出窗口的【数据源名称(DSN)】下拉列表中选择需要连接的数据源后,点击【确定】按钮。
![excel-odbc](./excel/odbc-select.jpg) ![excel-odbc](./excel/odbc-select.webp)
**第 4 步**,输入 TDengine 的用户名密码。 **第 4 步**,输入 TDengine 的用户名密码。
![excel-odbc](./excel/odbc-config.jpg) ![excel-odbc](./excel/odbc-config.webp)
**第 5 步**,在弹出的【导航器】对话框中,选择要加载的库表, 并点击【加载】完成数据加载。 **第 5 步**,在弹出的【导航器】对话框中,选择要加载的库表, 并点击【加载】完成数据加载。
![excel-odbc](./excel/odbc-load.jpg) ![excel-odbc](./excel/odbc-load.webp)
## 数据分析 ## 数据分析
选中导入的数据,在【插入】选项卡中选择柱状图,并且在右侧的【数据透视图】中配置数据字段。 选中导入的数据,在【插入】选项卡中选择柱状图,并且在右侧的【数据透视图】中配置数据字段。
![excel-odbc](./excel/odbc-data.jpg) ![excel-odbc](./excel/odbc-data.webp)

View File

@ -0,0 +1,83 @@
---
sidebar_label: FineBI
title: 与 FineBI 集成
---
帆软是一家专注于商业智能与数据分析领域的科技企业,凭借自主研发的 FineBI 和 FineReport 两款核心产品在行业内占据重要地位。帆软的 BI 工具广泛应用于各类企业,帮助用户实现数据的可视化分析、报表生成和数据决策支持。
通过使用 `TDengine Java connector` 连接器FineBI 可以快速访问 TDengine 的数据。用户可以在 FineBI 中直接连接 TDengine 数据库,获取时序数据进行分析并制作可视化报表,整个过程不需要任何代码编写过程。
## 前置条件
准备以下环境:
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
- FineBI 安装(如未安装,请下载并安装 [FineBI 下载](https://www.finebi.com/product/download))。
- 下载 `fine_conf_entity` 插件用于支持允许添加JDBC驱动, [下载地址](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d)。
- 安装 JDBC 驱动。从 `maven.org` 下载 `TDengine JDBC` 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 或以上版本。
## 配置数据源
**第 1 步**,在 FineBI 服务端 `db.script` 配置文件中,找到 `SystemConfig.driverUpload` 配置项并将其修改为 `true`
- Windows 系统:配置文件路径是安装目录下 `webapps/webroot/WEB-INF/embed/finedb/db.script`
- Liunx/Mac 系统:配置文件路径是 `/usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script`
**第 2 步**,启动 FineBI 服务,在浏览器中输入 `http://ip:37799/webroot/decision`, 其中 ip 是 FineBI 服务端 ip 地址。
**第 3 步** 打开 FineBI Web 页面登录后,点击【管理系统】->【插件管理】,在右侧的【应用商城】中点击【从本地安装】选择已下载的 `fine_conf_entity` 插件进行安装。
![finebi-workbook](./finebi/plugin.webp)
**第 4 步**,点击【管理系统】->【数据连接】->【数据连接管理】,在右侧页面中点击【驱动管理】按钮打开配置页面,点击【新建驱动】按钮并在弹出窗口中输入名称(比如 `tdengine-websocket`),进行 JDBC 驱动配置。
![finebi-workbook](./finebi/connect-manage.webp)
**第 5 步**,在驱动配置页面中点击【上传文件】按钮,选择已下载的 `TDengine Java Connector`(比如 `taos-jdbcdriver-3.4.0-dist.jar`)进行上传,上传完成后在【驱动】的下拉列表中选择 `com.taosdata.jdbc.ws.WebSocketDriver`,并点击【保存】。
![finebi-workbook](./finebi/new-driver.webp)
**第 6 步**,在 “数据连接管理” 页面中,点击【新建数据连接】按钮,随后点击 “其他” ,在右侧页面中点击 “其他JDBC” 进行连接配置。
![finebi-workbook](./finebi/jdbc-connect.webp)
**第 7 步**,在配置页面,先输入数据连接名称,接着在【驱动】选项中选择 “自定义”,并从下拉列表里选取已配置的驱动(例如 `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`),之后配置 “数据连接 URL”例如 `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`)。设置完成后,点击右上角的【测试连接】进行连接测试,验证成功后点击【保存】即可完成配置。
:::tip
`fineBIDialect=mysql` 设置的含义是采用 MySQL 数据库的 SQL 方言规则。简单来说,就是告诉 FineBI 按照 MySQL 数据库处理 SQL 语句的特定方式来解析和执行相关的查询与操作。
:::
![finebi-workbook](./finebi/jdbc-config.webp)
## 数据分析
### 数据准备
**第 1 步**,点击【公共数据】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine` 接着在文件夹的右侧点击【+】按钮,可创建 “数据库表” 数据集或 “SQL数据集”。
![finebi-workbook](./finebi/common.webp)
**第 2 步**,点击 “数据库表”,打开数据库选表页面,在左侧 “数据连接” 中选择已创建的连接,则在右侧会显示当前连接的数据库中的所有表,选择需要加载的表(比如 `meters`),点击【确定】即可显示 `meters` 表中的数据。
![finebi-workbook](./finebi/select-table.webp)
![finebi-workbook](./finebi/table-data.webp)
**第 3 步**,点击 “SQL数据集”打开 SQL 数据集的配置页面,首先输入表名(用于在 FineBI 页面显示),接着在 “数据来自数据连接” 下拉列表中选择已创建的连接, 之后输入 SQL 语句并点击预览即可看到查询结果最后点击【确定】SQL 数据集即可创建成功。
![finebi-workbook](./finebi/sql-data-config.webp)
### 智能电表样例
**第 1 步**,点击【我的分析】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine` 接着在文件夹的右侧点击【+】按钮,可创建 “分析主题”。
![finebi-workbook](./finebi/analysis-object.webp)
**第 2 步**,在分析主题页面选择数据集(比如 `meters`)后点击【确定】按钮,即可完成数据集关联。
![finebi-workbook](./finebi/load-data.webp)
**第 3 步**,点击分析主题页面下方的【组件】标签,打开图表配置页面, 拖动字段到横轴或纵轴即可展示出图表。
![finebi-workbook](./finebi/analysis-chart.webp)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 305 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 481 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 753 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 338 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 773 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 643 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 288 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 492 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 209 KiB

Some files were not shown because too many files have changed in this diff Show More