Merge branch '3.0' into doc/internal
|
@ -6,6 +6,7 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
- 'enh/cmake-TD-33848'
|
||||
|
||||
paths-ignore:
|
||||
|
|
|
@ -1,25 +1,20 @@
|
|||
name: TDengine CI Test
|
||||
|
||||
on:
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - 'main'
|
||||
# - '3.0'
|
||||
# - '3.1'
|
||||
# paths-ignore:
|
||||
# - 'packaging/**'
|
||||
# - 'docs/**'
|
||||
repository_dispatch:
|
||||
types: [run-tests]
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.ref || github.event.client_payload.ref}}-${{ github.event_name == 'repository_dispatch' && 'dispatch' || ''}}
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-TDengine
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CONTAINER_NAME: 'taosd-test'
|
||||
WKDIR: '/var/lib/jenkins/workspace'
|
||||
WK: '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
|
||||
jobs:
|
||||
|
@ -28,430 +23,82 @@ jobs:
|
|||
group: CI
|
||||
labels: [self-hosted, Linux, X64, testing]
|
||||
outputs:
|
||||
tdinternal: ${{ steps.parameters.outputs.tdinternal }}
|
||||
run_function_test: ${{ steps.parameters.outputs.run_function_test }}
|
||||
run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }}
|
||||
source_branch: ${{ steps.parameters.outputs.source_branch }}
|
||||
target_branch: ${{ steps.parameters.outputs.target_branch }}
|
||||
pr_number: ${{ steps.parameters.outputs.pr_number }}
|
||||
steps:
|
||||
- name: Determine trigger source and fetch parameters
|
||||
id: parameters
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# check the trigger source and get branch information
|
||||
if [ "${{ github.event_name }}" == "repository_dispatch" ]; then
|
||||
tdinternal="true"
|
||||
source_branch=${{ github.event.client_payload.tdinternal_source_branch }}
|
||||
target_branch=${{ github.event.client_payload.tdinternal_target_branch }}
|
||||
pr_number=${{ github.event.client_payload.tdinternal_pr_number }}
|
||||
run_tdgpt_test="true"
|
||||
run_function_test="true"
|
||||
else
|
||||
tdinternal="false"
|
||||
source_branch=${{ github.event.pull_request.head.ref }}
|
||||
target_branch=${{ github.event.pull_request.base.ref }}
|
||||
pr_number=${{ github.event.pull_request.number }}
|
||||
# target_branch=${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# check whether to run tdgpt test cases
|
||||
cd ${{ env.WKC }}
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD $target_branch`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
|
||||
# # Fetch the latest code from the target branch
|
||||
# cd ${{ env.WKC }}
|
||||
# git reset --hard
|
||||
# git clean -f
|
||||
# git remote prune origin
|
||||
# git fetch
|
||||
# git checkout "$target_branch"
|
||||
# git remote prune origin
|
||||
# git pull >/dev/null
|
||||
|
||||
if [[ "$changed_files_non_doc" != '' && "$changed_files_non_doc" =~ /forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics/ ]]; then
|
||||
run_tdgpt_test="true"
|
||||
else
|
||||
run_tdgpt_test="false"
|
||||
fi
|
||||
# # Check whether to run tdgpt test cases
|
||||
# changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
|
||||
# echo "changed files exclude doc: ${changed_files_non_doc}"
|
||||
|
||||
# if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
|
||||
# run_tdgpt_test="true"
|
||||
# else
|
||||
# run_tdgpt_test="false"
|
||||
# fi
|
||||
# echo "run tdgpt test: ${run_tdgpt_test}"
|
||||
|
||||
# check whether to run function test cases
|
||||
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD $target_branch`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics" | tr '\n' ' ' ||:)
|
||||
if [ "$changed_files_non_tdgpt" != '' ]; then
|
||||
run_function_test="true"
|
||||
else
|
||||
run_function_test="false"
|
||||
fi
|
||||
fi
|
||||
# # Check whether to run function test cases
|
||||
# changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
|
||||
# grep -v "^docs/en/" | \
|
||||
# grep -v "^docs/zh/" | \
|
||||
# grep -v ".md$" | \
|
||||
# grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
|
||||
# tr '\n' ' ' || :)
|
||||
# echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
|
||||
|
||||
# if [ -n "$changed_files_non_tdgpt" ]; then
|
||||
# run_function_test="true"
|
||||
# else
|
||||
# run_function_test="false"
|
||||
# fi
|
||||
|
||||
echo "tdinternal=$tdinternal" >> $GITHUB_OUTPUT
|
||||
# echo "run function test: ${run_function_test}"
|
||||
|
||||
run_tdgpt_test="true"
|
||||
run_function_test="true"
|
||||
# Output the results for GitHub Actions
|
||||
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
|
||||
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
|
||||
echo "source_branch=$source_branch" >> $GITHUB_OUTPUT
|
||||
echo "target_branch=$target_branch" >> $GITHUB_OUTPUT
|
||||
echo "pr_number=$pr_number" >> $GITHUB_OUTPUT
|
||||
|
||||
echo ${{ github.event.pull_request.head.ref }}
|
||||
echo ${{ github.event.pull_request.base.ref }}
|
||||
echo ${{ github.event.pull_request.number }}
|
||||
|
||||
run-tests-on-linux:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
|
||||
needs: fetch-parameters
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, testing]
|
||||
timeout-minutes: 200
|
||||
env:
|
||||
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
|
||||
RUN_RUNCTION_TEST: ${{ needs.fetch-parameters.outputs.run_function_test }}
|
||||
RUN_TDGPT_TEST: ${{ needs.fetch-parameters.outputs.run_tdgpt_test }}
|
||||
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
|
||||
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
|
||||
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
|
||||
steps:
|
||||
- name: Output the environment information
|
||||
run: |
|
||||
echo "::group::Environment Info"
|
||||
date
|
||||
hostname
|
||||
env
|
||||
echo "Runner: ${{ runner.name }}"
|
||||
echo "Trigger Source from TDinternal: ${{ env.IS_TDINTERNAL }}"
|
||||
echo "Workspace: ${{ env.WKDIR }}"
|
||||
git --version
|
||||
echo "${{ env.WKDIR }}/restore.sh -p ${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Prepare repositories
|
||||
run: |
|
||||
set -euo pipefail
|
||||
prepare_environment() {
|
||||
cd "$1"
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "$2"
|
||||
}
|
||||
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
|
||||
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
|
||||
|
||||
- name: Get latest codes and logs for TDinternal PR
|
||||
if: ${{ env.IS_TDINTERNAL == 'true' }}
|
||||
run: |
|
||||
cd ${{ env.WK }}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` TDinternalTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "TDinternal log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
cd ${{ env.WKC }}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
- name: Get latest codes and logs for TDengine PR
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' }}
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
cd ${{ env.WK }}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
- name: Update submodule
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git submodule update --init --recursive
|
||||
- name: Output the 'file_no_doc_changed' information to the file
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
||||
run: |
|
||||
mkdir -p ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ env.TARGET_BRANCH }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
|
||||
echo $changed_files_non_doc > ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt
|
||||
- name: Check assert testing
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_assert_container.sh -d ${{ env.WKDIR }}
|
||||
- name: Check void function testing
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_void_container.sh -d ${{ env.WKDIR }}
|
||||
- name: Build docker container
|
||||
if: ${{ env.RUN_RUNCTION_TEST == 'true' }}
|
||||
run: |
|
||||
date
|
||||
rm -rf ${{ env.WKC }}/debug
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
time ./container_build.sh -w ${{ env.WKDIR }} -e
|
||||
- name: Get parameters for testing
|
||||
id: get_param
|
||||
run: |
|
||||
log_server_file="/home/log_server.json"
|
||||
timeout_cmd=""
|
||||
extra_param=""
|
||||
|
||||
if [ -f "$log_server_file" ]; then
|
||||
log_server_enabled=$(jq '.enabled' "$log_server_file")
|
||||
timeout_param=$(jq '.timeout' "$log_server_file")
|
||||
if [ "$timeout_param" != "null" ] && [ "$timeout_param" != "0" ]; then
|
||||
timeout_cmd="timeout $timeout_param"
|
||||
fi
|
||||
|
||||
if [ "$log_server_enabled" == "1" ]; then
|
||||
log_server=$(jq '.server' "$log_server_file" | sed 's/\\\"//g')
|
||||
if [ "$log_server" != "null" ] && [ "$log_server" != "" ]; then
|
||||
extra_param="-w $log_server"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
echo "timeout_cmd=$timeout_cmd" >> $GITHUB_OUTPUT
|
||||
echo "extra_param=$extra_param" >> $GITHUB_OUTPUT
|
||||
- name: Run function returns with a null pointer scan testing
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_scan_container.sh -d ${{ env.WKDIR }} -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -f ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt ${{ steps.get_param.outputs.extra_param }}
|
||||
- name: Run tdgpt test cases
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' && env.RUN_TDGPT_TEST == 'true' }}
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
timeout 600 time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -l ${{ env.WKDIR }}/log -o 300 ${{ steps.get_param.outputs.extra_param }}
|
||||
- name: Run function test cases
|
||||
if: ${{ env.RUN_RUNCTION_TEST == 'true'}}
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
${{ steps.get_param.outputs.timeout_cmd }} time ./run.sh -e -m /home/m.json -t cases.task -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -l ${{ env.WKDIR }}/log -o 1200 ${{ steps.get_param.outputs.extra_param }}
|
||||
if: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
with:
|
||||
tdinternal: false
|
||||
run_function_test: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
run_tdgpt_test: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' }}
|
||||
|
||||
run-tests-on-mac:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, macOS, ARM64, testing]
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
|
||||
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
|
||||
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
|
||||
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
|
||||
steps:
|
||||
- name: Output the environment information
|
||||
run: |
|
||||
echo "::group::Environment Info"
|
||||
date
|
||||
hostname
|
||||
env
|
||||
echo "Runner: ${{ runner.name }}"
|
||||
echo "Trigger Source from TDinternal: ${{ env.IS_TDINTERNAL }}"
|
||||
echo "Workspace: ${{ env.WKDIR }}"
|
||||
git --version
|
||||
echo "${{ env.WKDIR }}/restore.sh -p ${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
|
||||
echo "::endgroup::"
|
||||
- name: Prepare repositories
|
||||
run: |
|
||||
set -euo pipefail
|
||||
prepare_environment() {
|
||||
cd "$1"
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "$2"
|
||||
}
|
||||
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
|
||||
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
|
||||
- name: Get latest codes and logs for TDinternal PR
|
||||
if: ${{ env.IS_TDINTERNAL == 'true' }}
|
||||
run: |
|
||||
cd ${{ env.WK }}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` TDinternalTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "TDinternal log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
cd ${{ env.WKC }}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
- name: Get latest codes and logs for TDengine PR
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' }}
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
cd ${{ env.WK }}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
- name: Update submodule
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git submodule update --init --recursive
|
||||
- name: Run tests
|
||||
run: |
|
||||
date
|
||||
cd ${{ env.WK }}
|
||||
rm -rf debug
|
||||
mkdir debug
|
||||
cd ${{ env.WK }}/debug
|
||||
echo $PATH
|
||||
echo "PATH=/opt/homebrew/bin:$PATH" >> $GITHUB_ENV
|
||||
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
|
||||
make -j10
|
||||
ctest -j10 || exit 7
|
||||
date
|
||||
with:
|
||||
tdinternal: false
|
||||
|
||||
run-tests-on-windows:
|
||||
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
|
||||
needs: fetch-parameters
|
||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Windows, X64, testing]
|
||||
timeout-minutes: 126
|
||||
env:
|
||||
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
|
||||
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
|
||||
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
|
||||
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
|
||||
WIN_INTERNAL_ROOT: "C:\\workspace\\0\\TDinternal"
|
||||
WIN_COMMUNITY_ROOT: "C:\\workspace\\0\\TDinternal\\community"
|
||||
WIN_SYSTEM_TEST_ROOT: "C:\\workspace\\0\\TDinternal\\community\\tests\\system-test"
|
||||
WIN_VS_PATH: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat"
|
||||
WIN_CPU_TYPE: "x64"
|
||||
steps:
|
||||
- name: Output the environment information
|
||||
run: |
|
||||
hostname
|
||||
taskkill /f /t /im python.exe
|
||||
taskkill /f /t /im bash.exe
|
||||
taskkill /f /t /im taosd.exe
|
||||
ipconfig
|
||||
set
|
||||
date /t
|
||||
time /t
|
||||
rd /s /Q "%WIN_INTERNAL_ROOT%\debug" || exit 0
|
||||
shell: cmd
|
||||
- name: Prepare repositories
|
||||
run: |
|
||||
:: Prepare internal repository
|
||||
if exist "%WIN_INTERNAL_ROOT%" (
|
||||
cd /d "%WIN_INTERNAL_ROOT%"
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "%TARGET_BRANCH%"
|
||||
) else (
|
||||
echo Directory does not exist: "%WIN_INTERNAL_ROOT%"
|
||||
exit 1
|
||||
)
|
||||
|
||||
:: Prepare community repository
|
||||
if exist "%WIN_COMMUNITY_ROOT%" (
|
||||
cd /d "%WIN_COMMUNITY_ROOT%"
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "%TARGET_BRANCH%"
|
||||
) else (
|
||||
echo Directory does not exist: "%WIN_COMMUNITY_ROOT%"
|
||||
exit 1
|
||||
)
|
||||
shell: cmd
|
||||
- name: Get latest codes and logs for TDinternal PR
|
||||
if: ${{ env.IS_TDINTERNAL == 'true' }}
|
||||
run: |
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git pull origin %TARGET_BRANCH%
|
||||
git fetch origin +refs/pull/%PR_NUMBER%/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git remote prune origin
|
||||
git pull
|
||||
shell: cmd
|
||||
- name: Get latest codes and logs for TDengine PR
|
||||
if: ${{ env.IS_TDINTERNAL == 'false' }}
|
||||
run: |
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git pull origin %TARGET_BRANCH%
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git remote prune origin
|
||||
git pull origin %TARGET_BRANCH%
|
||||
git fetch origin +refs/pull/%PR_NUMBER%/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
shell: cmd
|
||||
- name: Output branch and log information
|
||||
run: |
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
git branch
|
||||
git log -5
|
||||
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git branch
|
||||
git log -5
|
||||
shell: cmd
|
||||
- name: Update submodule
|
||||
run: |
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git submodule update --init --recursive
|
||||
shell: cmd
|
||||
- name: Build on windows
|
||||
run: |
|
||||
echo "building ..."
|
||||
time /t
|
||||
cd %WIN_INTERNAL_ROOT%
|
||||
mkdir debug
|
||||
cd debug
|
||||
time /t
|
||||
call "%WIN_VS_PATH%" %WIN_CPU_TYPE%
|
||||
set CL=/MP8
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
|
||||
time /t
|
||||
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true -DBUILD_TOOLS=true || exit 7
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
|
||||
time /t
|
||||
jom -j 6 || exit 8
|
||||
time /t
|
||||
|
||||
cd %WIN_COMMUNITY_ROOT%/tests/ci
|
||||
pip3 install taospy==2.7.21
|
||||
pip3 install taos-ws-py==0.3.8
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
shell: cmd
|
||||
- name: Run ctest
|
||||
run: |
|
||||
echo "windows ctest ..."
|
||||
time /t
|
||||
cd %WIN_INTERNAL_ROOT%\\debug
|
||||
ctest -j 1 || exit 7
|
||||
time /t
|
||||
shell: cmd
|
||||
- name: Run function test
|
||||
run: |
|
||||
echo "windows test ..."
|
||||
xcopy /e/y/i/f "%WIN_INTERNAL_ROOT%\debug\build\lib\taos.dll" C:\Windows\System32
|
||||
ls -l "C:\Windows\System32\taos.dll"
|
||||
time /t
|
||||
cd %WIN_SYSTEM_TEST_ROOT%
|
||||
echo "testing ..."
|
||||
test-all.bat ci
|
||||
time /t
|
||||
shell: cmd
|
||||
with:
|
||||
tdinternal: false
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
name: TDgpt CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '3.0'
|
||||
paths:
|
||||
- 'tools/tdgpt/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/tools/tdgpt
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install flake8 pytest pylint
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
|
||||
- name: Checking the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py') --exit-zero
|
||||
|
||||
- name: Checking the code with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
|
||||
- name: Run test cases with pytest
|
||||
run: |
|
||||
pytest
|
|
@ -0,0 +1,41 @@
|
|||
name: TDgpt Update Service
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 00 * * *'
|
||||
|
||||
env:
|
||||
WKC: "/root/TDengine"
|
||||
|
||||
jobs:
|
||||
update-service:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, tdgpt-anode-service]
|
||||
steps:
|
||||
- name: Update TDengine codes
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}
|
||||
git checkout 3.0
|
||||
|
||||
- name: Package the TDGpt Anode Service
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}/tools/tdgpt/script && ./release.sh
|
||||
|
||||
- name: Reinstall and restart the TDGpt Anode Service
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}/tools/tdgpt/release
|
||||
if [[ -f "TDengine-enterprise-anode-1.0.1.tar.gz" ]]; then
|
||||
tar -xzf TDengine-enterprise-anode-1.0.1.tar.gz
|
||||
cd TDengine-enterprise-anode-1.0.1
|
||||
./install.sh
|
||||
fi
|
||||
systemctl restart taosanoded
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if [[ -f ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1 ]] then rm -rf ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1; fi
|
10
Jenkinsfile2
|
@ -112,16 +112,6 @@ def build_pre_docs(){
|
|||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
'''
|
||||
|
||||
sh '''
|
||||
cd ${DOC_WKC}/${tools_repo}
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git fetch
|
||||
git remote prune origin
|
||||
git checkout ''' + env.CHANGE_TARGET + '''
|
||||
git pull >/dev/null
|
||||
'''
|
||||
}
|
||||
}
|
||||
|
||||
|
|
170
README-CN.md
|
@ -8,30 +8,30 @@
|
|||
</a>
|
||||
</p>
|
||||
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/careers/)
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看 [这里](https://www.taosdata.com/careers/)
|
||||
|
||||
# 目录
|
||||
|
||||
1. [TDengine 简介](#1-tdengine-简介)
|
||||
1. [文档](#2-文档)
|
||||
1. [必备工具](#3-必备工具)
|
||||
- [3.1 Linux预备](#31-linux系统)
|
||||
- [3.2 macOS预备](#32-macos系统)
|
||||
- [3.3 Windows预备](#33-windows系统)
|
||||
- [3.1 Linux 预备](#31-Linux系统)
|
||||
- [3.2 macOS 预备](#32-macOS系统)
|
||||
- [3.3 Windows 预备](#3.3-Windows系统)
|
||||
- [3.4 克隆仓库](#34-克隆仓库)
|
||||
1. [构建](#4-构建)
|
||||
- [4.1 Linux系统上构建](#41-linux系统上构建)
|
||||
- [4.2 macOS系统上构建](#42-macos系统上构建)
|
||||
- [4.3 Windows系统上构建](#43-windows系统上构建)
|
||||
- [4.1 Linux 系统上构建](#41-Linux系统上构建)
|
||||
- [4.2 macOS 系统上构建](#42-macOS系统上构建)
|
||||
- [4.3 Windows 系统上构建](#43-Windows系统上构建)
|
||||
1. [打包](#5-打包)
|
||||
1. [安装](#6-安装)
|
||||
- [6.1 Linux系统上安装](#61-linux系统上安装)
|
||||
- [6.2 macOS系统上安装](#62-macos系统上安装)
|
||||
- [6.3 Windows系统上安装](#63-windows系统上安装)
|
||||
- [6.1 Linux 系统上安装](#61-Linux系统上安装)
|
||||
- [6.2 macOS 系统上安装](#62-macOS系统上安装)
|
||||
- [6.3 Windows 系统上安装](#63-Windows系统上安装)
|
||||
1. [快速运行](#7-快速运行)
|
||||
- [7.1 Linux系统上运行](#71-linux系统上运行)
|
||||
- [7.2 macOS系统上运行](#72-macos系统上运行)
|
||||
- [7.3 Windows系统上运行](#73-windows系统上运行)
|
||||
- [7.1 Linux 系统上运行](#71-Linux系统上运行)
|
||||
- [7.2 macOS 系统上运行](#72-macOS系统上运行)
|
||||
- [7.3 Windows 系统上运行](#73-Windows系统上运行)
|
||||
1. [测试](#8-测试)
|
||||
1. [版本发布](#9-版本发布)
|
||||
1. [工作流](#10-工作流)
|
||||
|
@ -43,9 +43,9 @@
|
|||
|
||||
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供缓存、数据订阅、流式计算等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。与其他时序数据库相比,TDengine 的主要优势如下:
|
||||
|
||||
- **高性能**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
|
||||
- **高性能**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的 1/10。
|
||||
|
||||
- **云原生**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
|
||||
- **云原生**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持 k8s 部署,可运行在公有云、私有云和混合云上。
|
||||
|
||||
- **极简时序数据平台**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
|
||||
|
||||
|
@ -53,29 +53,29 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
|
|||
|
||||
- **简单易用**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
|
||||
|
||||
- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到 2022 年 8 月 1 日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
|
||||
了解TDengine高级功能的完整列表,请 [点击](https://tdengine.com/tdengine/)。体验TDengine最简单的方式是通过[TDengine云平台](https://cloud.tdengine.com)。
|
||||
了解TDengine高级功能的完整列表,请 [点击](https://tdengine.com/tdengine/)。体验 TDengine 最简单的方式是通过 [TDengine云平台](https://cloud.tdengine.com)。
|
||||
|
||||
# 2. 文档
|
||||
|
||||
关于完整的使用手册,系统架构和更多细节,请参考 [TDengine](https://www.taosdata.com/) 或者 [TDengine 官方文档](https://docs.taosdata.com)。
|
||||
|
||||
用户可根据需求选择通过[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)、[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装或直接使用无需安装部署的[云服务](https://cloud.taosdata.com/)。本快速指南是面向想自己编译、打包、测试的开发者的。
|
||||
用户可根据需求选择通过 [容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)、[Kubernetes](https://docs.taosdata.com/deployment/k8s/) 来安装或直接使用无需安装部署的 [云服务](https://cloud.taosdata.com/)。本快速指南是面向想自己编译、打包、测试的开发者的。
|
||||
|
||||
如果想编译或测试TDengine连接器,请访问以下仓库: [JDBC连接器](https://github.com/taosdata/taos-connector-jdbc), [Go连接器](https://github.com/taosdata/driver-go), [Python连接器](https://github.com/taosdata/taos-connector-python), [Node.js连接器](https://github.com/taosdata/taos-connector-node), [C#连接器](https://github.com/taosdata/taos-connector-dotnet), [Rust连接器](https://github.com/taosdata/taos-connector-rust).
|
||||
如果想编译或测试 TDengine 连接器,请访问以下仓库:[JDBC连接器](https://github.com/taosdata/taos-connector-jdbc)、[Go连接器](https://github.com/taosdata/driver-go)、[Python连接器](https://github.com/taosdata/taos-connector-python)、[Node.js连接器](https://github.com/taosdata/taos-connector-node)、[C#连接器](https://github.com/taosdata/taos-connector-dotnet)、[Rust连接器](https://github.com/taosdata/taos-connector-rust)。
|
||||
|
||||
# 3. 前置条件
|
||||
|
||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
|
||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64、ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
|
||||
|
||||
如果你想要编译 taosAdapter 或者 taosKeeper,需要安装 Go 1.18 及以上版本。
|
||||
|
||||
## 3.1 Linux系统
|
||||
## 3.1 Linux 系统
|
||||
|
||||
<details>
|
||||
|
||||
<summary>安装Linux必备工具</summary>
|
||||
<summary>安装 Linux 必备工具</summary>
|
||||
|
||||
### Ubuntu 18.04、20.04、22.04
|
||||
|
||||
|
@ -96,13 +96,13 @@ yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatom
|
|||
|
||||
</details>
|
||||
|
||||
## 3.2 macOS系统
|
||||
## 3.2 macOS 系统
|
||||
|
||||
<details>
|
||||
|
||||
<summary>安装macOS必备工具</summary>
|
||||
<summary>安装 macOS 必备工具</summary>
|
||||
|
||||
根据提示安装依赖工具 [brew](https://brew.sh/).
|
||||
根据提示安装依赖工具 [brew](https://brew.sh/)
|
||||
|
||||
```bash
|
||||
brew install argp-standalone gflags pkgconfig
|
||||
|
@ -110,11 +110,11 @@ brew install argp-standalone gflags pkgconfig
|
|||
|
||||
</details>
|
||||
|
||||
## 3.3 Windows系统
|
||||
## 3.3 Windows 系统
|
||||
|
||||
<details>
|
||||
|
||||
<summary>安装Windows必备工具</summary>
|
||||
<summary>安装 Windows 必备工具</summary>
|
||||
|
||||
进行中。
|
||||
|
||||
|
@ -122,7 +122,7 @@ brew install argp-standalone gflags pkgconfig
|
|||
|
||||
## 3.4 克隆仓库
|
||||
|
||||
通过如下命令将TDengine仓库克隆到指定计算机:
|
||||
通过如下命令将 TDengine 仓库克隆到指定计算机:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
|
@ -131,23 +131,23 @@ cd TDengine
|
|||
|
||||
# 4. 构建
|
||||
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools,您可以在编译 TDengine 时使用 `cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
|
||||
为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
|
||||
为了构建 TDengine,请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
|
||||
|
||||
## 4.1 Linux系统上构建
|
||||
## 4.1 Linux 系统上构建
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上构建步骤</summary>
|
||||
<summary>Linux 系统上构建步骤</summary>
|
||||
|
||||
可以通过以下命令使用脚本 `build.sh` 编译TDengine和taosTools,包括taosBenchmark和taosdump:
|
||||
可以通过以下命令使用脚本 `build.sh` 编译 TDengine 和 taosTools,包括 taosBenchmark 和 taosdump。
|
||||
|
||||
```bash
|
||||
./build.sh
|
||||
```
|
||||
|
||||
也可以通过以下命令进行构建:
|
||||
也可以通过以下命令进行构建:
|
||||
|
||||
```bash
|
||||
mkdir debug && cd debug
|
||||
|
@ -157,15 +157,15 @@ make
|
|||
|
||||
如果你想要编译 taosAdapter,需要添加 `-DBUILD_HTTP=false` 选项。
|
||||
|
||||
如果你想要编译 taosKeeper,需要添加 `--DBUILD_KEEPER=true` 选项。
|
||||
如果你想要编译 taosKeeper,需要添加 `-DBUILD_KEEPER=true` 选项。
|
||||
|
||||
可以使用Jemalloc作为内存分配器,而不是使用glibc:
|
||||
可以使用 Jemalloc 作为内存分配器,而不是使用 glibc:
|
||||
|
||||
```bash
|
||||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
cmake .. -DJEMALLOC_ENABLED=ON
|
||||
```
|
||||
TDengine构建脚本可以自动检测x86、x86-64、arm64平台上主机的体系结构。
|
||||
您也可以通过CPUTYPE选项手动指定架构:
|
||||
TDengine 构建脚本可以自动检测 x86、x86-64、arm64 平台上主机的体系结构。
|
||||
您也可以通过 CPUTYPE 选项手动指定架构:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
|
@ -173,13 +173,13 @@ cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
|||
|
||||
</details>
|
||||
|
||||
## 4.2 macOS系统上构建
|
||||
## 4.2 macOS 系统上构建
|
||||
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上构建步骤</summary>
|
||||
<summary>macOS 系统上构建步骤</summary>
|
||||
|
||||
请安装XCode命令行工具和cmake。使用XCode 11.4+在Catalina和Big Sur上完成验证。
|
||||
请安装 XCode 命令行工具和 cmake。使用 XCode 11.4+ 在 Catalina 和 Big Sur 上完成验证。
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
|
@ -192,14 +192,14 @@ cmake .. && cmake --build .
|
|||
|
||||
</details>
|
||||
|
||||
## 4.3 Windows系统上构建
|
||||
## 4.3 Windows 系统上构建
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上构建步骤</summary>
|
||||
<summary>Windows 系统上构建步骤</summary>
|
||||
|
||||
如果您使用的是Visual Studio 2013,请执行“cmd.exe”打开命令窗口执行如下命令。
|
||||
执行vcvarsall.bat时,64位的Windows请指定“amd64”,32位的Windows请指定“x86”。
|
||||
如果您使用的是 Visual Studio 2013,请执行 “cmd.exe” 打开命令窗口执行如下命令。
|
||||
执行 vcvarsall.bat 时,64 位的 Windows 请指定 “amd64”,32 位的 Windows 请指定 “x86”。
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
|
@ -208,19 +208,19 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
如果您使用Visual Studio 2019或2017:
|
||||
如果您使用 Visual Studio 2019 或 2017:
|
||||
|
||||
请执行“cmd.exe”打开命令窗口执行如下命令。
|
||||
执行vcvarsall.bat时,64位的Windows请指定“x64”,32位的Windows请指定“x86”。
|
||||
请执行 “cmd.exe” 打开命令窗口执行如下命令。
|
||||
执行 vcvarsall.bat 时,64 位的 Windows 请指定 “x64”,32 位的 Windows 请指定 “x86”。
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
|
||||
或者,您可以通过点击Windows开始菜单打开命令窗口->“Visual Studio < 2019 | 2017 >”文件夹->“x64原生工具命令提示符VS < 2019 | 2017 >”或“x86原生工具命令提示符VS < 2019 | 2017 >”取决于你的Windows是什么架构,然后执行命令如下:
|
||||
或者,您可以通过点击 Windows 开始菜单打开命令窗口 -> `Visual Studio < 2019 | 2017 >` 文件夹 -> `x64 原生工具命令提示符 VS < 2019 | 2017 >` 或 `x86 原生工具命令提示符 < 2019 | 2017 >` 取决于你的 Windows 是什么架构,然后执行命令如下:
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
|
@ -231,33 +231,33 @@ nmake
|
|||
|
||||
# 5. 打包
|
||||
|
||||
由于一些组件依赖关系,TDengine社区安装程序不能仅由该存储库创建。我们仍在努力改进。
|
||||
由于一些组件依赖关系,TDengine 社区安装程序不能仅由该存储库创建。我们仍在努力改进。
|
||||
|
||||
# 6. 安装
|
||||
|
||||
|
||||
## 6.1 Linux系统上安装
|
||||
## 6.1 Linux 系统上安装
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上安装详细步骤</summary>
|
||||
<summary>Linux 系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
构建成功后,TDengine 可以通过以下命令进行安装:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
从源代码安装还将为TDengine配置服务管理。用户也可以使用[TDengine安装包](https://docs.taosdata.com/get-started/package/)进行安装。
|
||||
从源代码安装还将为 TDengine 配置服务管理。用户也可以使用 [TDengine安装包](https://docs.taosdata.com/get-started/package/)进行安装。
|
||||
|
||||
</details>
|
||||
|
||||
## 6.2 macOS系统上安装
|
||||
## 6.2 macOS 系统上安装
|
||||
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上安装详细步骤</summary>
|
||||
<summary>macOS 系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
|
@ -265,13 +265,13 @@ sudo make install
|
|||
|
||||
</details>
|
||||
|
||||
## 6.3 Windows系统上安装
|
||||
## 6.3 Windows 系统上安装
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上安装详细步骤</summary>
|
||||
<summary>Windows 系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
构建成功后,TDengine 可以通过以下命令进行安装:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
|
@ -281,32 +281,32 @@ nmake install
|
|||
|
||||
# 7. 快速运行
|
||||
|
||||
## 7.1 Linux系统上运行
|
||||
## 7.1 Linux 系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上运行详细步骤</summary>
|
||||
<summary>Linux 系统上运行详细步骤</summary>
|
||||
|
||||
在Linux系统上安装TDengine完成后,在终端运行如下命令启动服务:
|
||||
在Linux 系统上安装 TDengine 完成后,在终端运行如下命令启动服务:
|
||||
|
||||
```bash
|
||||
sudo systemctl start taosd
|
||||
```
|
||||
然后用户可以通过如下命令使用TDengine命令行连接TDengine服务:
|
||||
然后用户可以通过如下命令使用 TDengine 命令行连接 TDengine 服务:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示连接错误信息。
|
||||
如果 TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示连接错误信息。
|
||||
|
||||
如果您不想将TDengine作为服务运行,您可以在当前终端中运行它。例如,要在构建完成后快速启动TDengine服务器,在终端中运行以下命令:(我们以Linux为例,Windows上的命令为 `taosd.exe`)
|
||||
如果您不想将 TDengine 作为服务运行,您可以在当前终端中运行它。例如,要在构建完成后快速启动 TDengine 服务器,在终端中运行以下命令:(以 Linux 为例,Windows 上的命令为 `taosd.exe`)
|
||||
|
||||
```bash
|
||||
./build/bin/taosd -c test/cfg
|
||||
```
|
||||
|
||||
在另一个终端上,使用TDengine命令行连接服务器:
|
||||
在另一个终端上,使用 TDengine 命令行连接服务器:
|
||||
|
||||
```bash
|
||||
./build/bin/taos -c test/cfg
|
||||
|
@ -316,42 +316,42 @@ taos
|
|||
|
||||
</details>
|
||||
|
||||
## 7.2 macOS系统上运行
|
||||
## 7.2 macOS 系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上运行详细步骤</summary>
|
||||
<summary>macOS 系统上运行详细步骤</summary>
|
||||
|
||||
在macOS上安装完成后启动服务,双击/applications/TDengine启动程序,或者在终端中执行如下命令:
|
||||
在 macOS 上安装完成后启动服务,双击 `/applications/TDengine` 启动程序,或者在终端中执行如下命令:
|
||||
|
||||
```bash
|
||||
sudo launchctl start com.tdengine.taosd
|
||||
```
|
||||
|
||||
然后在终端中使用如下命令通过TDengine命令行连接TDengine服务器:
|
||||
然后在终端中使用如下命令通过 TDengine 命令行连接 TDengine 服务器:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果TDengine命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示错误信息。
|
||||
如果 TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示错误信息。
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## 7.3 Windows系统上运行
|
||||
## 7.3 Windows 系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上运行详细步骤</summary>
|
||||
<summary>Windows 系统上运行详细步骤</summary>
|
||||
|
||||
您可以使用以下命令在Windows平台上启动TDengine服务器:
|
||||
您可以使用以下命令在 Windows 平台上启动 TDengine 服务器:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taosd.exe -c test\cfg
|
||||
```
|
||||
|
||||
在另一个终端上,使用TDengine命令行连接服务器:
|
||||
在另一个终端上,使用 TDengine 命令行连接服务器:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taos.exe -c test\cfg
|
||||
|
@ -363,25 +363,25 @@ taos
|
|||
|
||||
# 8. 测试
|
||||
|
||||
有关如何在TDengine上运行不同类型的测试,请参考 [TDengine测试](./tests/README-CN.md)
|
||||
有关如何在 TDengine 上运行不同类型的测试,请参考 [TDengine测试](./tests/README-CN.md)
|
||||
|
||||
# 9. 版本发布
|
||||
|
||||
TDengine发布版本的完整列表,请参考 [版本列表](https://github.com/taosdata/TDengine/releases)
|
||||
TDengine 发布版本的完整列表,请参考 [版本列表](https://github.com/taosdata/TDengine/releases)
|
||||
|
||||
# 10. 工作流
|
||||
|
||||
TDengine构建检查工作流可以在参考 [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml), 更多的工作流正在创建中,将很快可用。
|
||||
TDengine 构建检查工作流可以在参考 [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml),更多的工作流正在创建中,将很快可用。
|
||||
|
||||
# 11. 覆盖率
|
||||
|
||||
最新的TDengine测试覆盖率报告可参考 [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
|
||||
最新的 TDengine 测试覆盖率报告可参考 [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
|
||||
|
||||
<details>
|
||||
|
||||
<summary>如何在本地运行测试覆盖率报告?</summary>
|
||||
|
||||
在本地创建测试覆盖率报告(HTML格式),请运行以下命令:
|
||||
在本地创建测试覆盖率报告(HTML 格式),请运行以下命令:
|
||||
|
||||
```bash
|
||||
cd tests
|
||||
|
@ -389,8 +389,8 @@ bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
|
|||
# on main branch and run cases in longtimeruning_cases.task
|
||||
# for more infomation about options please refer to ./run_local_coverage.sh -h
|
||||
```
|
||||
> **注意:**
|
||||
> 请注意,-b和-i选项将使用-DCOVER=true选项重新编译TDengine,这可能需要花费一些时间。
|
||||
> **注意**:
|
||||
> 请注意,-b 和 -i 选项将使用 -DCOVER=true 选项重新编译 TDengine,这可能需要花费一些时间。
|
||||
|
||||
</details>
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` op
|
|||
You can use Jemalloc as memory allocator instead of glibc:
|
||||
|
||||
```bash
|
||||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
cmake .. -DJEMALLOC_ENABLED=ON
|
||||
```
|
||||
|
||||
TDengine build script can auto-detect the host machine's architecture on x86, x86-64, arm64 platform.
|
||||
|
|
|
@ -116,9 +116,6 @@ ELSE()
|
|||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz")
|
||||
ENDIF()
|
||||
|
||||
# force set all platform to JEMALLOC_ENABLED = false
|
||||
# SET(JEMALLOC_ENABLED OFF)
|
||||
|
||||
IF(TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
|
||||
|
@ -260,12 +257,15 @@ ELSE()
|
|||
ENDIF()
|
||||
|
||||
|
||||
IF(TD_LINUX)
|
||||
IF(TD_LINUX_64)
|
||||
IF(${JEMALLOC_ENABLED})
|
||||
MESSAGE(STATUS "JEMALLOC_ENABLED Enabled")
|
||||
MESSAGE(STATUS "JEMALLOC Enabled")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=attributes")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=attributes")
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib)
|
||||
ELSE()
|
||||
MESSAGE(STATUS "JEMALLOC_ENABLED Disabled")
|
||||
MESSAGE(STATUS "JEMALLOC Disabled")
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF()
|
||||
ENDIF()
|
|
@ -145,20 +145,19 @@ Query OK, 10 row(s) in set (2.415961s)
|
|||
|
||||
In TDengine, you can use the window clause to perform aggregation queries by time window partitioning, which is particularly suitable for scenarios requiring analysis of large amounts of time-series data, such as smart meters collecting data every 10s but needing to query the average temperature every 1min.
|
||||
|
||||
The window clause allows you to partition the queried data set by windows and aggregate the data within each window, including:
|
||||
|
||||
- Time window (time window)
|
||||
- State window (status window)
|
||||
- Session window (session window)
|
||||
- Event window (event window)
|
||||
|
||||
The logic of window partitioning is shown in the following image:
|
||||
The window clause allows you to partition the queried data set by windows and aggregate the data within each window. The logic of window partitioning is shown in the following image:
|
||||
|
||||
<figure>
|
||||
<Image img={windowModel} alt="Windowing description"/>
|
||||
<figcaption>Figure 1. Windowing logic</figcaption>
|
||||
</figure>
|
||||
|
||||
- Time Window: Data is divided based on time intervals, supporting sliding and tumbling time windows, suitable for data aggregation over fixed time periods.
|
||||
- Status Window: Windows are divided based on changes in device status values, with data of the same status value grouped into one window, which closes when the status value changes.
|
||||
- Session Window: Sessions are divided based on the differences in record timestamps, with records having a timestamp interval less than the predefined value belonging to the same session.
|
||||
- Event Window: Windows are dynamically divided based on the start and end conditions of events, opening when the start condition is met and closing when the end condition is met.
|
||||
- Count Window: Windows are divided based on the number of data rows, with each window consisting of a specified number of rows for aggregation calculations.
|
||||
|
||||
The syntax for the window clause is as follows:
|
||||
|
||||
```sql
|
||||
|
|
|
@ -86,9 +86,15 @@ The keep alive interval is the time interval negotiated between the client and t
|
|||
|
||||
In **Clean Session**, choose whether to clear the session. The default value is true.
|
||||
|
||||
Fill in the Topic names to be consumed in **Subscription Topics and QoS Configuration**. Use the following format: `topic1::0,topic2::1`.
|
||||
In the **Topics Qos Config**, fill in the topic name and QoS to subscribe. Use the following format: `{topic_name}::{qos}` (e.g., `my_topic::0`). MQTT protocol 5.0 supports shared subscriptions, allowing multiple clients to subscribe to the same topic for load balancing. Use the following format: `$share/{group_name}/{topic_name}::{qos}`, where `$share` is a fixed prefix indicating the enablement of shared subscription, and `group_name` is the client group name, similar to Kafka's consumer group.
|
||||
|
||||
Click the **Check Connectivity** button to check if the data source is available.
|
||||
In the **Topic Analysis**, fill in the MQTT topic parsing rules. The format is the same as the MQTT Topic, parsing each level of the MQTT Topic into corresponding variable names, with `_` indicating that the current level is ignored during parsing. For example: if the MQTT Topic `a/+/c` corresponds to the parsing rule `v1/v2/_`, it means assigning the first level `a` to variable `v1`, the value of the second level (where the wildcard `+` represents any value) to variable `v2`, and ignoring the value of the third level `c`, which will not be assigned to any variable. In the `payload parsing` below, the variables obtained from Topic parsing can also participate in various transformations and calculations.
|
||||
|
||||
In the **Compression**, configure the message body compression algorithm. After receiving the message, taosX uses the corresponding compression algorithm to decompress the message body and obtain the original data. Options include none (no compression), gzip, snappy, lz4, and zstd, with the default being none.
|
||||
|
||||
In the **Char Encoding**, configure the message body encoding format. After receiving the message, taosX uses the corresponding encoding format to decode the message body and obtain the original data. Options include UTF_8, GBK, GB18030, and BIG5, with the default being UTF_8.
|
||||
|
||||
Click the **Check Connection** button to check if the data source is available.
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep05} alt=""/>
|
||||
|
|
|
@ -495,10 +495,10 @@ taos> select myfun(v1, v2) from t;
|
|||
DB error: udf function execution failure (0.011088s)
|
||||
```
|
||||
|
||||
Unfortunately, the execution failed. What could be the reason? Check the udfd process logs.
|
||||
Unfortunately, the execution failed. What could be the reason? Check the taosudf process logs.
|
||||
|
||||
```shell
|
||||
tail -10 /var/log/taos/udfd.log
|
||||
tail -10 /var/log/taos/taosudf.log
|
||||
```
|
||||
|
||||
Found the following error messages.
|
||||
|
|
|
@ -339,7 +339,7 @@ Helm operates Kubernetes using kubectl and kubeconfig configurations, which can
|
|||
The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
|
||||
|
||||
```shell
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-enterpise-3.5.0.tgz
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-enterprise-3.5.0.tgz
|
||||
```
|
||||
|
||||
Note that it's for the enterprise edition, and the community edition is not yet available.
|
||||
|
|
|
@ -27,19 +27,19 @@ It should be noted that when configuring the ODBC data source for Tableau, the [
|
|||
|
||||
**Step 3**, Click the `DSN` radio button, then select the configured data source (MyTDengine), and click the `Connect` button. After the connection is successful, delete the content of the string attachment, and finally click the `Sign In` button.
|
||||
|
||||

|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
**Step 1**, In the workbook page, the connected data sources will be displayed. Clicking on the dropdown list of databases will display the databases that require data analysis. On this basis, click the search button in the table options to display all tables in the database. Then, drag the table to be analyzed to the right area to display the table structure.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 2**, Click the `Update Now` button below to display the data in the table.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 3**, Click on the "Worksheet" at the bottom of the window to pop up the data analysis window, which displays all the fields of the analysis table. Drag the fields to the rows and columns to display the chart.
|
||||
|
||||

|
||||

|
||||
|
||||
|
|
|
@ -21,22 +21,22 @@ Prepare the following environment:
|
|||
|
||||
**Step 2**, Start Excel in the Windows system environment, then select [Data] -> [Get Data] -> [From Other Sources] -> [From ODBC].
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 3**, In the pop-up window, select the data source you need to connect to from the drop-down list of [Data source name (DSN)], and then click the [OK] button.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 4**, Enter the username and password for TDengine.
|
||||
|
||||

|
||||

|
||||
|
||||
**Step 5**, In the pop-up [Navigator] dialog box, select the database tables you want to load, and then click [Load] to complete the data loading.
|
||||
|
||||

|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
Select the imported data. On the [Insert] tab, choose the column chart, and then configure the data fields in the [PivotChart Fields] pane on the right.
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
---
|
||||
sidebar_label: FineBI
|
||||
title: Integration With FineBI
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
Fanruan is a technology company specializing in the field of business intelligence and data analytics. With its self-developed core products, FineBI and FineReport, the company has established a leading position in the industry. Fanruan's BI tools are widely adopted by enterprises across various sectors, empowering users to achieve data visualization analysis, report generation, and data-driven decision support.
|
||||
|
||||
By using the TDengine Java connector, FineBI can quickly access the data in TDengine. Users can directly connect to the TDengine database in FineBI, obtain time-series data for analysis, and create visual reports, and the entire process does not require any code writing.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- TDengine 3.3.4.0 and above version is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Install FineBI (if not installed, please download and install [Download FineBI](https://intl.finebi.com/download)).
|
||||
- Download the fine_conf_entity plugin to support the addition of JDBC drivers, [Download link](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d).
|
||||
- Install the JDBC driver. Download the `TDengine JDBC connector` file `taos-jdbcdriver-3.4.0-dist.jar` or a higher version from `maven.org`.
|
||||
|
||||
## Configure Data Source
|
||||
|
||||
**Step 1**, In the `db.script` configuration file of the FineBI server, find the `SystemConfig.driverUpload` configuration item and change its value to true.
|
||||
|
||||
- Windows system: The path of the configuration file is webapps/webroot/WEB-INF/embed/finedb/db.script under the installation directory.
|
||||
- Linux/Mac system: The path of the configuration file is /usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script.
|
||||
|
||||
**Step 2**, Start the FineBI service. Enter `http://ip:37799/webroot/decision` in the browser, where "ip" is the IP address of the FineBI server.
|
||||
|
||||
**Step 3**, After logging in to the FineBI Web page, click [System Management] -> [Plugin Management]. In the [Store App] on the right side, click [Install From Local] and select the downloaded `fine_conf_entity` plugin for installation.
|
||||
|
||||

|
||||
|
||||
**Step 4**, Click [System Management] -> [Data Connection] -> [Data Connection Management]. On the right-hand page, click the [Driver Management] button to open the configuration page. Then click the [New Driver] button, and in the pop-up window, enter a name (for example, `tdengine-websocket`) to configure the JDBC driver.
|
||||
|
||||

|
||||
|
||||
**Step 5**, On the driver configuration page, click the [Upload File] button. Select the downloaded TDengine Java Connector (e.g., `taos-jdbcdriver-3.4.0-dist.jar`) for uploading. After the upload is complete, select `com.taosdata.jdbc.ws.WebSocketDriver` from the drop-down list of [Driver], and then click [Save].
|
||||
|
||||

|
||||
|
||||
**Step 6**, On the "Data Connection Management" page, click the [New Data Connection] button. Subsequently, click "Others", and then on the right-side page, click "Other JDBC" to perform the connection configuration.
|
||||
|
||||

|
||||
|
||||
**Step 7**, On the configuration page, first enter the name of the data connection. Then, select "Custom" in the [Driver] option and choose the configured driver from the drop-down list (e.g., `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`). After that, configure the "Data Connection URL" (e.g., `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`). Once the settings are completed, click [Test Connection] in the top-right corner to test the connection. After the verification is successful, click [Save] to finish the configuration.
|
||||
|
||||
:::tip
|
||||
`fineBIDialect=mysql` The meaning of this setting is to adopt the SQL dialect rules of the MySQL database. Simply put, it tells FineBI to parse and execute relevant queries and operations in the specific way that the MySQL database handles SQL statements.
|
||||
:::
|
||||
|
||||

|
||||
|
||||
## Data Analysis
|
||||
|
||||
### Data preparation
|
||||
|
||||
**Step 1**, Click [Public Data]. On the right - hand page, click [New Folder] to create a folder (e.g., TDengine). Then, click the [+] button on the right side of the folder to create a "Database Table" dataset or an "SQL Dataset".
|
||||
|
||||

|
||||
|
||||
**Step 2**, Click "Database Table" to open the database table selection page. In the "Data Connection" section on the left, select the previously created connection. Then, all the tables in the database of the current connection will be displayed on the right. Select the table you need to load (e.g., meters), and click [OK]. The data in the meters table will then be displayed.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
**Step 3**, Click "SQL Dataset" to open the configuration page for the SQL dataset. First, enter the table name (used for display on the FineBI page). Then, select the previously created connection from the drop-down list of "Data from Data Connection". After that, enter the SQL statement and click "Preview" to view the query results. Finally, click [OK] to successfully create the SQL dataset.
|
||||
|
||||

|
||||
|
||||
### Smart Meter Example
|
||||
|
||||
**Step 1**, Click [My Analysis]. On the right-hand page, click [New Folder] to create a folder (for example, `TDengine`). Then, click the [+] button on the right side of the folder to create an "Analysis Subject".
|
||||
|
||||

|
||||
|
||||
**Step 2**, On the analysis subject page, select the dataset (for example, `meters`) and then click the [OK] button to complete the association of the dataset.
|
||||
|
||||

|
||||
|
||||
**Step 3**, Click the [Component] tab at the bottom of the analysis subject page to open the chart configuration page. Drag the fields to the horizontal axis or the vertical axis, and then the chart will be displayed.
|
||||
|
||||

|
Before Width: | Height: | Size: 300 KiB |
After Width: | Height: | Size: 470 KiB |
Before Width: | Height: | Size: 761 KiB |
After Width: | Height: | Size: 324 KiB |
Before Width: | Height: | Size: 1.3 MiB |
After Width: | Height: | Size: 769 KiB |
Before Width: | Height: | Size: 659 KiB |
After Width: | Height: | Size: 286 KiB |
Before Width: | Height: | Size: 505 KiB |
After Width: | Height: | Size: 205 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 19 KiB |
After Width: | Height: | Size: 21 KiB |
After Width: | Height: | Size: 38 KiB |
After Width: | Height: | Size: 44 KiB |
After Width: | Height: | Size: 33 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 110 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 38 KiB |
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 243 KiB |
After Width: | Height: | Size: 389 KiB |
Before Width: | Height: | Size: 255 KiB |
After Width: | Height: | Size: 543 KiB |
Before Width: | Height: | Size: 226 KiB |
After Width: | Height: | Size: 593 KiB |
Before Width: | Height: | Size: 107 KiB |
After Width: | Height: | Size: 189 KiB |
|
@ -43,7 +43,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
||||
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
||||
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 0-86400000,in milliseconds, default value 10000|
|
||||
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 3000-86400000,in milliseconds, default value 10000|
|
||||
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||
|
||||
|
@ -231,6 +231,7 @@ The effective value of charset is UTF-8.
|
|||
|udf | |Supported, effective after restart|Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||
|udfdResFuncs | |Supported, effective after restart|Internal parameter, for setting UDF result sets|
|
||||
|udfdLdLibPath | |Supported, effective after restart|Internal parameter, indicates the library path for loading UDF|
|
||||
|enableStrongPassword | After 3.3.5.0 |Supported, effective after restart|The password include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters, special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? \| ~ , .`; 0: disable, 1: enable; default value 1 |
|
||||
|
||||
### Stream Computing Parameters
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ The TDengine client driver provides all the APIs needed for application programm
|
|||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||
|compareAsStrInGreatest | v3.3.6.0 |Supported, effective immediately |When the greatest and least functions have both numeric and string types as parameters, the comparison type conversion rules are as follows: Integer; 1: uniformly converted to string comparison, 0: uniformly converted to numeric type comparison.|
|
||||
|
||||
### Writing Related
|
||||
|
||||
|
|
|
@ -188,9 +188,12 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
|
||||
The parameters listed in this section apply to all functional modes.
|
||||
|
||||
- **filetype**: The function to test, possible values are `insert`, `query`, and `subscribe`. Corresponding to insert, query, and subscribe functions. Only one can be specified in each configuration file.
|
||||
- **filetype**: The function to test, possible values are `insert`, `query`, `subscribe` and `csvfile`. Corresponding to insert, query, subscribe and generate csv file functions. Only one can be specified in each configuration file.
|
||||
|
||||
- **cfgdir**: Directory where the TDengine client configuration file is located, default path is /etc/taos.
|
||||
|
||||
- **output_dir**: The directory specified for output files. When the feature category is csvfile, it refers to the directory where the generated csv files will be saved. The default value is ./output/.
|
||||
|
||||
- **host**: Specifies the FQDN of the TDengine server to connect to, default value is localhost.
|
||||
|
||||
- **port**: The port number of the TDengine server to connect to, default value is 6030.
|
||||
|
@ -283,6 +286,27 @@ Parameters related to supertable creation are configured in the `super_tables` s
|
|||
- **repeat_ts_max** : Numeric type, when composite primary key is enabled, specifies the maximum number of records with the same timestamp to be generated
|
||||
- **sqls** : Array of strings type, specifies the array of sql to be executed after the supertable is successfully created, the table name specified in sql must be prefixed with the database name, otherwise an unspecified database error will occur
|
||||
|
||||
- **csv_file_prefix**: String type, sets the prefix for the names of the generated csv files. Default value is "data".
|
||||
|
||||
- **csv_ts_format**: String type, sets the format of the time string in the names of the generated csv files, following the `strftime` format standard. If not set, files will not be split by time intervals. Supported patterns include:
|
||||
- %Y: Year as a four-digit number (e.g., 2025)
|
||||
- %m: Month as a two-digit number (01 to 12)
|
||||
- %d: Day of the month as a two-digit number (01 to 31)
|
||||
- %H: Hour in 24-hour format as a two-digit number (00 to 23)
|
||||
- %M: Minute as a two-digit number (00 to 59)
|
||||
- %S: Second as a two-digit number (00 to 59)
|
||||
|
||||
- **csv_ts_interval**: String type, sets the time interval for splitting generated csv file names. Supports daily, hourly, minute, and second intervals such as 1d/2h/30m/40s. The default value is "1d".
|
||||
|
||||
- **csv_output_header**: String type, sets whether the generated csv files should contain column header descriptions. The default value is "yes".
|
||||
|
||||
- **csv_tbname_alias**: String type, sets the alias for the tbname field in the column header descriptions of csv files. The default value is "device_id".
|
||||
|
||||
- **csv_compress_level**: String type, sets the compression level for generating csv-encoded data and automatically compressing it into gzip file. This process directly encodes and compresses the data, rather than first generating a csv file and then compressing it. Possible values are:
|
||||
- none: No compression
|
||||
- fast: gzip level 1 compression
|
||||
- balance: gzip level 6 compression
|
||||
- best: gzip level 9 compression
|
||||
|
||||
#### Tag and Data Columns
|
||||
|
||||
|
@ -347,10 +371,14 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
### Query Parameters
|
||||
|
||||
In query scenarios, `filetype` must be set to `query`.
|
||||
`filetype` must be set to `query`.
|
||||
|
||||
`query_mode` connect method:
|
||||
- "taosc": Native.
|
||||
- "rest" : RESTful.
|
||||
|
||||
`query_times` specifies the number of times to run the query, numeric type.
|
||||
|
||||
Query scenarios can control the execution of slow query statements by setting `kill_slow_query_threshold` and `kill_slow_query_interval` parameters, where threshold controls that queries exceeding the specified exec_usec time will be killed by taosBenchmark, in seconds; interval controls the sleep time to avoid continuous slow query CPU consumption, in seconds.
|
||||
|
||||
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
|
||||
|
||||
|
@ -358,13 +386,26 @@ For other common parameters, see [General Configuration Parameters](#general-con
|
|||
|
||||
Configuration parameters for querying specified tables (can specify supertables, subtables, or regular tables) are set in `specified_table_query`.
|
||||
- **mixed_query** : Query Mode . "yes" is `Mixed Query`, "no" is `General Query`, default is "no".
|
||||
`General Query`:
|
||||
`General Query`:
|
||||
Each SQL in `sqls` starts `threads` threads to query this SQL, Each thread exits after executing the `query_times` queries, and only after all threads executing this SQL have completed can the next SQL be executed.
|
||||
The total number of queries(`General Query`) = the number of `sqls` * `query_times` * `threads`
|
||||
`Mixed Query`:
|
||||
`Mixed Query`:
|
||||
All SQL statements in `sqls` are divided into `threads` groups, with each thread executing one group. Each SQL statement needs to execute `query_times` queries.
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`
|
||||
The total number of queries(`Mixed Query`) = the number of `sqls` * `query_times`.
|
||||
|
||||
- **batch_query** : Batch query power switch.
|
||||
"yes": indicates that it is enabled.
|
||||
"no": indicates that it is not enabled, and other values report errors.
|
||||
Batch query refers to dividing all SQL statements in SQL into `threads` groups, with each thread executing one group.
|
||||
Each SQL statement is queried only once before exiting, and the main thread waits for all threads to complete before determining if the `query_interval` parameter is set. If sleep is required for a specified time, each thread group is restarted and the previous process is repeated until the number of queries is exhausted.
|
||||
Functional limitations:
|
||||
- Only supports scenarios where `mixed_query` is set to 'yes'.
|
||||
- Restful queries are not supported, meaning `query_made` cannot be 'rest'.
|
||||
|
||||
- **query_interval** : Query interval, in millisecond, default is 0.
|
||||
When the 'batch_query' switch is turned on, it indicates the interval time after each batch query is completed, When closed, it indicates the interval time between each SQL query completion.
|
||||
If the execution time of the query exceeds the interval time, it will no longer wait. If the execution time of the query is less than the interval time, it is necessary to wait to make up for the interval time.
|
||||
|
||||
- **threads** : Number of threads executing the SQL query, default is 1.
|
||||
- **sqls**:
|
||||
- **sql**: The SQL command to execute, required.
|
||||
|
@ -478,6 +519,17 @@ Note: Data types in the taosBenchmark configuration file must be in lowercase to
|
|||
|
||||
</details>
|
||||
|
||||
### Export CSV File Example
|
||||
|
||||
<details>
|
||||
<summary>csv-export.json</summary>
|
||||
|
||||
```json
|
||||
{{#include /TDengine/tools/taos-tools/example/csv-export.json}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Other json examples see [here](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
||||
|
||||
## Output Performance Indicators
|
||||
|
|
|
@ -124,7 +124,39 @@ FLOOR(expr)
|
|||
```
|
||||
|
||||
**Function Description**: Gets the floor of the specified field.
|
||||
Other usage notes see CEIL function description.
|
||||
Other usage notes see [CEIL](#ceil) function description.
|
||||
|
||||
#### GREATEST
|
||||
```sql
|
||||
GREATEST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**Function Description**: Get the maximum value of all input parameters. The minimum number of parameters for this function is 2.
|
||||
|
||||
**Version**:ver-3.3.6.0
|
||||
|
||||
**Return Type**:Refer to the comparison rules. The comparison type is the final return type.
|
||||
|
||||
**Applicable Data Types**:
|
||||
- Numeric types: timestamp, bool, integer and floating point types
|
||||
- Strings types: nchar and varchar types.
|
||||
|
||||
**Comparison rules**: The following rules describe the conversion method of the comparison operation:
|
||||
- If any parameter is NULL, the comparison result is NULL.
|
||||
- If all parameters in the comparison operation are string types, compare them as string types
|
||||
- If all parameters are numeric types, compare them as numeric types.
|
||||
- If there are both string types and numeric types in the parameters, according to the `compareAsStrInGreatest` configuration item, they are uniformly compared as strings or numeric values. By default, they are compared as strings.
|
||||
- In all cases, when different types are compared, the comparison type will choose the type with a larger range for comparison. For example, when comparing integer types, if there is a BIGINT type, BIGINT will definitely be selected as the comparison type.
|
||||
|
||||
**Related configuration items**: Client configuration, compareAsStrInGreatest is 1, which means that both string types and numeric types are converted to string comparisons, and 0 means that they are converted to numeric types. The default is 1.
|
||||
|
||||
|
||||
#### LEAST
|
||||
```sql
|
||||
LEAST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**Function Description**:Get the minimum value of all input parameters. The rest of the description is the same as the [GREATEST](#greatest) function.
|
||||
|
||||
#### LOG
|
||||
|
||||
|
|
|
@ -127,10 +127,11 @@ Displays created indexes.
|
|||
## SHOW LOCAL VARIABLES
|
||||
|
||||
```sql
|
||||
SHOW LOCAL VARIABLES;
|
||||
SHOW LOCAL VARIABLES [like pattern];
|
||||
```
|
||||
|
||||
Displays the runtime values of configuration parameters for the current client.
|
||||
You can use the like pattern to filter by name.
|
||||
|
||||
## SHOW MNODES
|
||||
|
||||
|
@ -320,11 +321,11 @@ Displays information about all users in the current system, including user-defin
|
|||
## SHOW CLUSTER VARIABLES (before version 3.0.1.6 it was SHOW VARIABLES)
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER VARIABLES;
|
||||
SHOW DNODE dnode_id VARIABLES;
|
||||
SHOW CLUSTER VARIABLES [like pattern];
|
||||
SHOW DNODE dnode_id VARIABLES [like pattern];
|
||||
```
|
||||
|
||||
Displays the runtime values of configuration parameters that need to be the same across nodes in the current system, or you can specify a DNODE to view its configuration parameters.
|
||||
Displays the runtime values of configuration parameters that need to be the same across nodes in the current system, or you can specify a DNODE to view its configuration parameters. And you can use the like pattern to filter by name.
|
||||
|
||||
## SHOW VGROUPS
|
||||
|
||||
|
|
|
@ -485,10 +485,10 @@ This document details the server error codes that may be encountered when using
|
|||
| Error Code | Description | Possible Scenarios or Reasons | Recommended Actions |
|
||||
| ---------- | ---------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 0x80002901 | udf is stopping | udf call received when dnode exits | Stop executing udf queries |
|
||||
| 0x80002902 | udf pipe read error | Error occurred when taosd reads from udfd pipe | udfd unexpectedly exits, 1) C udf crash 2) udfd crash |
|
||||
| 0x80002903 | udf pipe connect error | Error establishing pipe connection to udfd in taosd | 1) Corresponding udfd not started in taosd. Restart taosd |
|
||||
| 0x80002904 | udf pipe not exist | Connection error occurs between two phases of udf setup, call, and teardown, causing the connection to disappear, subsequent phases continue | udfd unexpectedly exits, 1) C udf crash 2) udfd crash |
|
||||
| 0x80002905 | udf load failure | Error loading udf in udfd | 1) udf does not exist in mnode 2) Error in udf loading. Check logs |
|
||||
| 0x80002902 | udf pipe read error | Error occurred when taosd reads from taosudf pipe | taosudf unexpectedly exits, 1) C udf crash 2) taosudf crash |
|
||||
| 0x80002903 | udf pipe connect error | Error establishing pipe connection to taosudf in taosd | 1) Corresponding taosudf not started in taosd. Restart taosd |
|
||||
| 0x80002904 | udf pipe not exist | Connection error occurs between two phases of udf setup, call, and teardown, causing the connection to disappear, subsequent phases continue | taosudf unexpectedly exits, 1) C udf crash 2) taosudf crash |
|
||||
| 0x80002905 | udf load failure | Error loading udf in taosudf | 1) udf does not exist in mnode 2) Error in udf loading. Check logs |
|
||||
| 0x80002906 | udf invalid function input | udf input check | udf function does not accept input, such as wrong column type |
|
||||
| 0x80002907 | udf invalid bufsize | Intermediate result in udf aggregation function exceeds specified bufsize | Increase bufsize, or reduce intermediate result size |
|
||||
| 0x80002908 | udf invalid output type | udf output type differs from the type specified when creating udf | Modify udf, or the type when creating udf, to match the result |
|
||||
|
|
|
@ -94,7 +94,7 @@ The sink task is responsible for receiving the output results from the agg task
|
|||
The above three types of tasks each play their roles in the stream computing architecture, distributed at different levels. Clearly, the number of source tasks directly depends on the number of vnodes, with each source task independently handling the data in its vnode without interference from other source tasks, and there are no sequential constraints. However, it is worth noting that if the final stream computing results converge to one table, then only one sink task will be deployed on the vnode where that table is located. The collaborative relationship between these three types of tasks is shown in the following diagram, together forming the complete execution process of stream computing tasks.
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep02} alt="Relationships between tasks"/>
|
||||
<Image img={imgStep03} alt="Relationships between tasks"/>
|
||||
<figcaption>Figure 3. Relationships between tasks</figcaption>
|
||||
</figure>
|
||||
|
||||
|
|
|
@ -0,0 +1,293 @@
|
|||
---
|
||||
title: DST(Daylight Saving Time) Usage
|
||||
description: Explanation and suggestions for using DST(Daylight Saving Time) in TDengine
|
||||
---
|
||||
|
||||
## Background
|
||||
|
||||
In the use of time-series databases, there are times when Daylight Saving Time (DST) is encountered. We analyze and explain the use and issues of DST in TDengine to help you use TDengine more smoothly.
|
||||
|
||||
## Definitions
|
||||
|
||||
### Time Zone
|
||||
|
||||
A time zone is a region on Earth that uses the same standard time. Due to the Earth's rotation, to ensure that the time in each place is coordinated with the local sunrise and sunset, the world is divided into multiple time zones.
|
||||
|
||||
### IANA Time Zone
|
||||
|
||||
The IANA (Internet Assigned Numbers Authority) time zone database, also known as the tz database, provides a standard reference for global time zone information. It is the basis for modern systems and software to handle time zone-related operations.
|
||||
|
||||
IANA uses the "Region/City" format (e.g., Europe/Berlin) to clearly identify time zones.
|
||||
|
||||
TDengine supports the use of IANA time zones in different components (except for the time zone settings in Windows taos.cfg).
|
||||
|
||||
### Standard Time and Local Time
|
||||
|
||||
Standard time is the time determined based on a fixed meridian on Earth. It provides a unified reference point for each time zone.
|
||||
|
||||
- Greenwich Mean Time (GMT): Historically used reference time, located at the 0° meridian.
|
||||
- Coordinated Universal Time (UTC): The modern time standard, similar to GMT but more precise.
|
||||
|
||||
The relationship between standard time and time zones is as follows:
|
||||
|
||||
- Reference: Standard time (e.g., UTC) is the reference point for setting time zones.
|
||||
- Offset: Different time zones are defined by their offset from standard time. For example, UTC+1 means 1 hour ahead of UTC.
|
||||
- Regional Division: The world is divided into multiple time zones, each using one or more standard times.
|
||||
|
||||
Relative to standard time, each region sets its local time based on its time zone:
|
||||
|
||||
- Time Zone Offset: Local time equals standard time plus the offset of the time zone. For example, UTC+2 means 2 hours ahead of UTC.
|
||||
- Daylight Saving Time (DST): Some regions adjust their local time during specific periods, such as moving the clock forward by one hour. See the next section for details.
|
||||
|
||||
### Daylight Saving Time
|
||||
|
||||
Daylight Saving Time (DST) is a system that advances the time by one hour to make better use of daylight and save energy. It usually starts in spring and ends in autumn. The specific start and end times of DST vary by region. The following explanation uses Berlin time as an example to illustrate DST and its effects.
|
||||
|
||||

|
||||
|
||||
According to this rule, you can see:
|
||||
|
||||
- The time between 02:00:00 and 03:00:00 (excluding 03:00:00) on March 31, 2024, in Berlin local time does not exist (jump).
|
||||
- The time between 02:00:00 and 03:00:00 (excluding 03:00:00) on October 27, 2024, in Berlin local time appears twice.
|
||||
|
||||
#### DST and the IANA Time Zone Database
|
||||
|
||||
- Recording Rules: The IANA time zone database records detailed DST rules for each region, including the start and end dates and times.
|
||||
- Automatic Adjustment: Many operating systems and software use the IANA database to automatically handle DST adjustments.
|
||||
- Historical Changes: The IANA database also tracks historical DST changes to ensure accuracy.
|
||||
|
||||
#### DST and Timestamp Conversion
|
||||
|
||||
- Converting a timestamp to local time is deterministic. For example, 1729990654 is Berlin time DST 2024-10-27 02:57:34, and 1729994254 is Berlin time standard time 2024-10-27 02:57:34 (these two local times are the same except for the time offset).
|
||||
- Without specifying the time offset, converting local time to a timestamp is indeterminate. The time skipped during DST does not exist and cannot be converted to a timestamp, such as Berlin time 2024-03-31 02:34:56 does not exist and cannot be converted to a timestamp. The repeated time during the end of DST cannot determine which timestamp it is, such as 2024-10-27 02:57:34 without specifying the time offset cannot determine whether it is 1729990654 or 1729994254. Specifying the time offset can determine the timestamp, such as 2024-10-27 02:57:34 CEST(+02:00), specifying DST 2024-10-27 02:57:34 timestamp 1729990654.
|
||||
|
||||
### RFC3339 Time Format
|
||||
|
||||
RFC 3339 is an internet time format standard used to represent dates and times. It is based on the ISO 8601 standard but specifies some format details more specifically.
|
||||
|
||||
The format is as follows:
|
||||
|
||||
- Basic Format: `YYYY-MM-DDTHH:MM:SSZ`
|
||||
- Time Zone Representation:
|
||||
- Z represents Coordinated Universal Time (UTC).
|
||||
- Offset format, such as +02:00, represents the time difference from UTC.
|
||||
|
||||
With explicit time zone offsets, the RFC 3339 format can accurately parse and compare times globally.
|
||||
|
||||
The advantages of RFC 3339 include:
|
||||
|
||||
- Standardization: Provides a unified format for easy cross-system data exchange.
|
||||
- Clarity: Clearly indicates time zone information, avoiding time misunderstandings.
|
||||
|
||||
TDengine uses the RFC3339 format for display in REST API and Explorer UI. In SQL statements, you can use the RFC3339 format to write timestamp data:
|
||||
|
||||
```sql
|
||||
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
|
||||
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
|
||||
```
|
||||
|
||||
### Undefined Behavior
|
||||
|
||||
Undefined behavior refers to specific code or operations that do not have a clearly defined result and do not guarantee compatibility with that result. TDengine may modify the current behavior in a future version without notifying users. Therefore, users should not rely on the current undefined behavior for judgment or application in TDengine.
|
||||
|
||||
## Writing and Querying DST in TDengine
|
||||
|
||||
We use the following table to show the impact of DST on writing and querying.
|
||||
|
||||

|
||||
|
||||
### Table Explanation
|
||||
|
||||
- **TIMESTAMP**: TDengine uses a 64-bit integer to store raw timestamps.
|
||||
- **UTC**: The UTC time representation corresponding to the timestamp.
|
||||
- **Europe/Berlin**: The RFC3339 format time corresponding to the Europe/Berlin time zone.
|
||||
- **Local**: The local time corresponding to the Europe/Berlin time zone (without time zone).
|
||||
|
||||
### Table Analysis
|
||||
|
||||
- At the **start of DST** (Berlin time March 31, 02:00), the time jumps directly from 02:00 to 03:00 (one hour forward).
|
||||
- Light green is the timestamp one hour before the start of DST;
|
||||
- Dark green is the timestamp one hour after the start of DST;
|
||||
- Red indicates that the nonexistent local time was inserted into the TDengine database:
|
||||
- Using SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` to insert data from `2024-03-31 02:00:00` to `2024-03-31 02:59:59` will be automatically adjusted to -1000 (in TDengine, this is undefined behavior, currently this value is related to the database precision, millisecond database is -1000, microsecond database is -1000000, nanosecond database is -1000000000), because that moment does not exist in local time;
|
||||
- At the **end of DST** (Berlin time October 27, 03:00), the time jumps from 03:00 to 02:00 (one hour back).
|
||||
- Light blue indicates the timestamp one hour before the clock jump;
|
||||
- Dark blue indicates the timestamp within one hour after the clock jump, its local time without time zone is the same as the previous hour.
|
||||
- Purple indicates the timestamp one hour after the clock jump;
|
||||
- **Local Time Changes**: It can be seen that due to the adjustment of DST, local time changes, which may cause some time periods to appear repeated or missing.
|
||||
- **UTC Time Unchanged**: UTC time remains unchanged, ensuring the consistency and order of time.
|
||||
- **RFC3339**: The RFC3339 format time shows the change in time offset, changing to +02:00 after the start of DST and to +01:00 after the end of DST.
|
||||
- **Conditional Query**:
|
||||
- At the **start of DST**, the skipped time (`[03-31 02:00:00,03-31 03:00:00)`) does not exist, so using that time for queries results in undefined behavior: `SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'` (the nonexistent local timestamp is converted to `-1000`):
|
||||
|
||||
```sql
|
||||
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
|
||||
ts |
|
||||
=================
|
||||
-1000 |
|
||||
Query OK, 1 row(s) in set (0.003635s)
|
||||
```
|
||||
|
||||
When the nonexistent timestamp is used together with the existing timestamp, the result is also not as expected, as shown below where the start local time does not exist:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
|
||||
ts | to_iso8601(ts,'Z') |
|
||||
==================================================
|
||||
-1000 | 1969-12-31T23:59:59.000Z |
|
||||
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||
1711846800000 | 2024-03-31T01:00:00.000Z |
|
||||
1711846801000 | 2024-03-31T01:00:01.000Z |
|
||||
Query OK, 5 row(s) in set (0.003339s)
|
||||
```
|
||||
|
||||
In the following statements, the first SQL query end time does not exist, and the second end time exists. The first SQL query result is not as expected:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
|
||||
Query OK, 0 row(s) in set (0.000930s)
|
||||
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
|
||||
ts | to_iso8601(ts,'Z') |
|
||||
==================================================
|
||||
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||
Query OK, 2 row(s) in set (0.001227s)
|
||||
```
|
||||
|
||||
- At the end of DST, the repeated time (`[10-27 02:00:00,10-27 03:00:00)` excluding `10-27 03:00:00`) appears twice, and using that time range for queries in TDengine is also undefined behavior.
|
||||
- Querying the data between `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` includes the repeated timestamps and the data at `2024-10-27 03:00:00`:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=======================================================================================
|
||||
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
|
||||
Query OK, 5 row(s) in set (0.001370s)
|
||||
```
|
||||
|
||||
- However, the following query for the range [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] can only find the data at the first 2024-10-27 02:00:00 time point:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=======================================================================================
|
||||
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
Query OK, 1 row(s) in set (0.004480s)
|
||||
```
|
||||
|
||||
- The following query for the range [2024-10-27 02:00:01,2024-10-27 02:57:35] can find 3 rows of data (including one row of local time data at 02:59:59):
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
===============================================================================================
|
||||
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
Query OK, 3 row(s) in set (0.004428s)
|
||||
```
|
||||
|
||||
## Summary and Suggestions
|
||||
|
||||
### Summary
|
||||
|
||||
This explanation only addresses the impact of using local time. Using UNIX timestamps or RFC3339 has no impact.
|
||||
|
||||
- Writing:
|
||||
- It is not possible to write data for nonexistent times during the DST transition.
|
||||
- Writing data for repeated times during the DST transition is undefined behavior.
|
||||
- Querying:
|
||||
- Querying with conditions that specify the skipped time during the start of DST results in undefined behavior.
|
||||
- Querying with conditions that specify the repeated time during the end of DST results in undefined behavior.
|
||||
- Display:
|
||||
- Displaying with time zones is not affected.
|
||||
- Displaying local time is accurate, but repeated times during the end of DST cannot be distinguished.
|
||||
- Users should be cautious when using time without time zones for display and application.
|
||||
|
||||
### Suggestions
|
||||
|
||||
To avoid unnecessary impacts of DST on querying and writing in TDengine, it is recommended to use explicit time offsets for writing and querying.
|
||||
|
||||
- Use UNIX Timestamps: Using UNIX timestamps can avoid time zone issues.
|
||||
|
||||
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
|
||||
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
|
||||
|
||||
```sql
|
||||
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
|
||||
Insert OK, 2 row(s) affected (0.001434s)
|
||||
|
||||
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
|
||||
ts | v1 |
|
||||
===============================
|
||||
1711846799000 | 1 |
|
||||
1711846800000 | 2 |
|
||||
Query OK, 2 row(s) in set (0.003503s)
|
||||
```
|
||||
|
||||
- Use RFC3339 Time Format: The RFC3339 time format with time zone offsets can effectively avoid the uncertainty of DST.
|
||||
|
||||
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
|
||||
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
|
||||
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
|
||||
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
|
||||
|
||||
```sql
|
||||
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
|
||||
('2024-10-27T02:59:59.000+02:00', 2)
|
||||
('2024-10-27T02:00:00.000+01:00', 3)
|
||||
('2024-10-27T02:59:59.000+01:00', 4);
|
||||
Insert OK, 4 row(s) affected (0.001514s)
|
||||
|
||||
taos> SELECT *,
|
||||
to_iso8601(ts,'Z'),
|
||||
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||
AND ts <= '2024-10-27T02:59:59.000+01:00';
|
||||
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=====================================================================================================
|
||||
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
Query OK, 4 row(s) in set (0.004275s)
|
||||
|
||||
taos> SELECT *,
|
||||
to_iso8601(ts,'Z'),
|
||||
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||
AND ts <= '2024-10-27T02:59:59.000+02:00';
|
||||
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=====================================================================================================
|
||||
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
Query OK, 2 row(s) in set (0.004275s)
|
||||
```
|
||||
|
||||
- Pay Attention to Time Zone Settings When Querying: When querying and displaying, if local time is needed, be sure to consider the impact of DST.
|
||||
- taosAdapter: When using the REST API, it supports setting the IANA time zone, and the result is returned in RFC3339 format.
|
||||
|
||||
```shell
|
||||
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
|
||||
-d "select ts from tz1.t1"
|
||||
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
|
||||
```
|
||||
|
||||
- Explorer: When using the Explorer page for SQL queries, users can configure the client time zone to display in RFC3339 format.
|
||||
|
||||

|
||||
|
||||
## Reference Documents
|
||||
|
||||
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
|
||||
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)
|
After Width: | Height: | Size: 234 KiB |
After Width: | Height: | Size: 80 KiB |
After Width: | Height: | Size: 72 KiB |
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 268 KiB |
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 75 KiB |
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 216 KiB |
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 184 KiB |
Before Width: | Height: | Size: 243 KiB After Width: | Height: | Size: 334 KiB |
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 103 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 164 KiB After Width: | Height: | Size: 95 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 293 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 90 KiB |
Before Width: | Height: | Size: 103 KiB After Width: | Height: | Size: 263 KiB |
Before Width: | Height: | Size: 282 KiB After Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 90 KiB |
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 57 KiB |
Before Width: | Height: | Size: 338 KiB After Width: | Height: | Size: 264 KiB |
Before Width: | Height: | Size: 501 KiB After Width: | Height: | Size: 342 KiB |
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 140 KiB After Width: | Height: | Size: 91 KiB |
Before Width: | Height: | Size: 96 KiB After Width: | Height: | Size: 68 KiB |
Before Width: | Height: | Size: 149 KiB After Width: | Height: | Size: 140 KiB |
Before Width: | Height: | Size: 131 KiB After Width: | Height: | Size: 109 KiB |
Before Width: | Height: | Size: 117 KiB After Width: | Height: | Size: 137 KiB |
Before Width: | Height: | Size: 133 KiB After Width: | Height: | Size: 82 KiB |
Before Width: | Height: | Size: 141 KiB After Width: | Height: | Size: 200 KiB |
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 107 KiB |
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 57 KiB |
Before Width: | Height: | Size: 178 KiB After Width: | Height: | Size: 135 KiB |
Before Width: | Height: | Size: 190 KiB After Width: | Height: | Size: 283 KiB |
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 68 KiB |
Before Width: | Height: | Size: 61 KiB After Width: | Height: | Size: 80 KiB |
|
@ -4,6 +4,10 @@ title: TDengine 数据查询
|
|||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import win from './window.png';
|
||||
import swin from './session-window.png';
|
||||
import ewin from './event-window.png';
|
||||
|
||||
相较于其他众多时序数据库和实时数据库,TDengine 的一个独特优势在于,自其首个版本发布之初便支持标准的 SQL 查询功能。这一特性极大地降低了用户在使用过程中的学习难度。本章将以智能电表的数据模型为例介绍如何在 TDengine 中运用 SQL 查询来处理时序数据。如果需要进一步了解 SQL 语法的细节和功能,建议参阅 TDengine 的官方文档。通过本章的学习,你将能够熟练掌握 TDengine 的 SQL 查询技巧,进而高效地对时序数据进行操作和分析。
|
||||
|
||||
## 基本查询
|
||||
|
@ -136,16 +140,15 @@ Query OK, 10 row(s) in set (2.415961s)
|
|||
|
||||
在 TDengine 中,你可以使用窗口子句来实现按时间窗口切分方式进行聚合结果查询,这种查询方式特别适用于需要对大量时间序列数据进行分析的场景,例如智能电表每 10s 采集一次数据,但需要查询每隔 1min 的温度平均值。
|
||||
|
||||
窗口子句允许你针对查询的数据集合按照窗口进行切分,并对每个窗口内的数据进行聚合,包含:
|
||||
- 时间窗口(time window)
|
||||
- 状态窗口(status window)
|
||||
- 会话窗口(session window)
|
||||
- 事件窗口(event window)
|
||||
- 计数窗口(count window)
|
||||
窗口子句允许你针对查询的数据集合按照窗口进行切分,并对每个窗口内的数据进行聚合。窗口划分逻辑如下图所示。
|
||||
|
||||
窗口划分逻辑如下图所示:
|
||||
<img src={win} width="500" alt="常用窗口划分逻辑" />
|
||||
|
||||

|
||||
- 时间窗口(time window):根据时间间隔划分数据,支持滑动时间窗口和翻转时间窗口,适用于按固定时间周期进行数据聚合。
|
||||
- 状态窗口(status window):基于设备状态值的变化划分窗口,相同状态值的数据归为一个窗口,状态值改变时窗口关闭。
|
||||
- 会话窗口(session window):根据记录的时间戳差异划分会话,时间戳间隔小于预设值的记录属于同一会话。
|
||||
- 事件窗口(event window):基于事件的开始条件和结束条件动态划分窗口,满足开始条件时窗口开启,满足结束条件时窗口关闭。
|
||||
- 计数窗口(count window):根据数据行数划分窗口,每达到指定行数即为一个窗口,并进行聚合计算。
|
||||
|
||||
窗口子句语法如下:
|
||||
|
||||
|
@ -408,7 +411,8 @@ Query OK, 22 row(s) in set (0.153403s)
|
|||
|
||||
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30] 和 [2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。
|
||||
|
||||

|
||||
<img src={swin} width="320" alt="会话窗口示意图" />
|
||||
|
||||
|
||||
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
|
||||
|
||||
|
@ -461,7 +465,7 @@ Query OK, 10 row(s) in set (0.043489s)
|
|||
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10
|
||||
```
|
||||
|
||||

|
||||
<img src={ewin} width="350" alt="事件窗口示意图" />
|
||||
|
||||
示例 SQL:
|
||||
|
||||
|
|
Before Width: | Height: | Size: 188 KiB After Width: | Height: | Size: 95 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 188 KiB After Width: | Height: | Size: 97 KiB |
|
@ -3,7 +3,12 @@ title: "常见问题"
|
|||
sidebar_label: "常见问题"
|
||||
---
|
||||
|
||||
<b>1. 创建 anode 失败,返回指定服务无法访问</b>
|
||||
### 1. 安装过程中编译 uWSGI 失败,如何处理
|
||||
TDgpt 安装过程中需要在本地编译 uWSGI,某些环境的 Python(例如:anaconda)安装 uWSGI 会出现冲突导致编译失败,安装流程因此无法继续下去。这种情况下可以尝试在安装过程中忽略 uWSGI的安装。
|
||||
由于忽略了 uWSGI 安装,后续启动 taosasnode 服务的时候,需要手动输入命令进行启动 `python3.10 /usr/local/taos/taosanode/lib/taosanalytics/app.py` 。 执行该命令的时候请确保使用了虚拟环境中的 Python 程序才能加载依赖库。
|
||||
|
||||
|
||||
### 2. 创建 anode 失败,返回指定服务无法访问
|
||||
|
||||
```bash
|
||||
taos> create anode '127.0.0.1:6090';
|
||||
|
@ -26,7 +31,7 @@ curl: (7) Failed to connect to 127.0.0.1 port 6090: Connection refused
|
|||
|
||||
>请勿使用 systemctl status taosanode 检查 taosanode 是否正常
|
||||
|
||||
<b>2. 服务正常,查询过程返回服务不可用</b>
|
||||
### 3. 服务正常,查询过程返回服务不可用
|
||||
```bash
|
||||
taos> select _frowts,forecast(current, 'algo=arima, alpha=95, wncheck=0, rows=20') from d1 where ts<='2017-07-14 10:40:09.999';
|
||||
|
||||
|
@ -34,7 +39,7 @@ DB error: Analysis service can't access[0x80000441] (60.195613s)
|
|||
```
|
||||
数据分析默认超时时间是 60s,出现这个问题的原因是输入数据分析过程超过默认的最长等待时间,请尝试采用限制数据输入范围的方式将输入数据规模减小或者更换分析算法再次尝试。
|
||||
|
||||
<b>3. 返回结果出现非法 JSON 格式错误 (Invalid json format) </b>
|
||||
### 4. 返回结果出现非法 JSON 格式错误 (Invalid json format)
|
||||
|
||||
从 anode 返回到 TDengine 的分析结果有误,请检查 anode 运行日志 `/var/log/taos/taosanode/taosanode.app.log`,以便于获得具体的错误信息。
|
||||
|
||||
|
|
|
@ -472,10 +472,10 @@ taos> select myfun(v1, v2) from t;
|
|||
DB error: udf function execution failure (0.011088s)
|
||||
```
|
||||
|
||||
不幸的是执行失败了,什么原因呢?查看 udfd 进程的日志。
|
||||
不幸的是执行失败了,什么原因呢?查看 taosudf 进程的日志。
|
||||
|
||||
```shell
|
||||
tail -10 /var/log/taos/udfd.log
|
||||
tail -10 /var/log/taos/taosudf.log
|
||||
```
|
||||
|
||||
发现以下错误信息。
|
||||
|
|
|
@ -127,8 +127,8 @@ s3migrate database <db_name>;
|
|||
|
||||
当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。
|
||||
|
||||
```math
|
||||
上传次数 = 数据文件大小 / (s3_chunkpages * tsdb_pagesize) - 1
|
||||
```text
|
||||
上传次数 = 数据文件大小 / (s3_chunkpages * tsdb_pagesize) - 1
|
||||
```
|
||||
|
||||
在创建数据库时,可以通过 `s3_chunkpages` 参数调整每个文件块的大小,从而控制每个数据文件的上传次数。
|
||||
|
@ -141,7 +141,7 @@ s3migrate database <db_name>;
|
|||
|
||||
相邻的多个数据页会作为一个数据块从对象存储下载一次,以减少从对象存储下载的次数。每个数据页的大小,在创建数据库时,通过 `tsdb_pagesize` 参数指定,默认 4K 字节。
|
||||
|
||||
```math
|
||||
```text
|
||||
下载次数 = 查询需要的数据块数量 - 已缓存的数据块数量
|
||||
```
|
||||
|
||||
|
|
|
@ -26,18 +26,18 @@ Tableau 是一款知名的商业智能工具,它支持多种数据源,可方
|
|||
|
||||
**第 3 步**,点击 `DSN` 单选框,接着选择已配置好的数据源(MyTDengine),然后点击`连接`按钮。待连接成功后,删除字符串附加部分的内容,最后点击`登录`按钮即可。
|
||||
|
||||

|
||||

|
||||
|
||||
## 数据分析
|
||||
|
||||
**第 1 步**,在工作簿页面中,选择已连接的数据源。点击数据库的下拉列表,会显示需要进行数据分析的数据库。在此基础上,点击表选项中的查找按钮,即可将该数据库下的所有表显示出来。然后,拖动需要分析的表到右侧区域,即可显示出表结构。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 2 步**,点击下方的"立即更新"按钮,即可将表中的数据展示出来。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 3 步**,点击窗口下方的"工作表",弹出数据分析窗口, 并展示分析表的所有字段,将字段拖动到行列即可展示出图表。
|
||||
|
||||

|
||||

|
|
@ -19,22 +19,22 @@ title: 与 Excel 集成
|
|||
|
||||
**第 2 步**,在 Windows 系统环境下启动 Excel,之后选择【数据】->【获取数据】->【自其他源】->【从ODBC】。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 3 步**,在弹出窗口的【数据源名称(DSN)】下拉列表中选择需要连接的数据源后,点击【确定】按钮。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 4 步**,输入 TDengine 的用户名密码。
|
||||
|
||||

|
||||

|
||||
|
||||
**第 5 步**,在弹出的【导航器】对话框中,选择要加载的库表, 并点击【加载】完成数据加载。
|
||||
|
||||

|
||||

|
||||
|
||||
## 数据分析
|
||||
|
||||
选中导入的数据,在【插入】选项卡中选择柱状图,并且在右侧的【数据透视图】中配置数据字段。
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
sidebar_label: FineBI
|
||||
title: 与 FineBI 集成
|
||||
---
|
||||
|
||||
帆软是一家专注于商业智能与数据分析领域的科技企业,凭借自主研发的 FineBI 和 FineReport 两款核心产品在行业内占据重要地位。帆软的 BI 工具广泛应用于各类企业,帮助用户实现数据的可视化分析、报表生成和数据决策支持。
|
||||
|
||||
通过使用 `TDengine Java connector` 连接器,FineBI 可以快速访问 TDengine 的数据。用户可以在 FineBI 中直接连接 TDengine 数据库,获取时序数据进行分析并制作可视化报表,整个过程不需要任何代码编写过程。
|
||||
|
||||
## 前置条件
|
||||
|
||||
准备以下环境:
|
||||
|
||||
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- FineBI 安装(如未安装,请下载并安装 [FineBI 下载](https://www.finebi.com/product/download))。
|
||||
- 下载 `fine_conf_entity` 插件用于支持允许添加JDBC驱动, [下载地址](https://market.fanruan.com/plugin/1052a471-0239-4cd8-b832-045d53182c5d)。
|
||||
- 安装 JDBC 驱动。从 `maven.org` 下载 `TDengine JDBC` 连接器文件 `taos-jdbcdriver-3.4.0-dist.jar` 或以上版本。
|
||||
|
||||
## 配置数据源
|
||||
|
||||
**第 1 步**,在 FineBI 服务端 `db.script` 配置文件中,找到 `SystemConfig.driverUpload` 配置项并将其修改为 `true`。
|
||||
|
||||
- Windows 系统:配置文件路径是安装目录下 `webapps/webroot/WEB-INF/embed/finedb/db.script`。
|
||||
- Liunx/Mac 系统:配置文件路径是 `/usr/local/FineBI6.1/webapps/webroot/WEB-INF/embed/finedb/db.script`。
|
||||
|
||||
**第 2 步**,启动 FineBI 服务,在浏览器中输入 `http://ip:37799/webroot/decision`, 其中 ip 是 FineBI 服务端 ip 地址。
|
||||
|
||||
**第 3 步**, 打开 FineBI Web 页面登录后,点击【管理系统】->【插件管理】,在右侧的【应用商城】中点击【从本地安装】选择已下载的 `fine_conf_entity` 插件进行安装。
|
||||
|
||||

|
||||
|
||||
**第 4 步**,点击【管理系统】->【数据连接】->【数据连接管理】,在右侧页面中点击【驱动管理】按钮打开配置页面,点击【新建驱动】按钮并在弹出窗口中输入名称(比如 `tdengine-websocket`),进行 JDBC 驱动配置。
|
||||
|
||||

|
||||
|
||||
**第 5 步**,在驱动配置页面中点击【上传文件】按钮,选择已下载的 `TDengine Java Connector`(比如 `taos-jdbcdriver-3.4.0-dist.jar`)进行上传,上传完成后在【驱动】的下拉列表中选择 `com.taosdata.jdbc.ws.WebSocketDriver`,并点击【保存】。
|
||||
|
||||

|
||||
|
||||
**第 6 步**,在 “数据连接管理” 页面中,点击【新建数据连接】按钮,随后点击 “其他” ,在右侧页面中点击 “其他JDBC” 进行连接配置。
|
||||
|
||||

|
||||
|
||||
**第 7 步**,在配置页面,先输入数据连接名称,接着在【驱动】选项中选择 “自定义”,并从下拉列表里选取已配置的驱动(例如 `com.taosdata.jdbc.ws.WebSocketDriver (tdengine-websocket)`),之后配置 “数据连接 URL”(例如 `jdbc:TAOS-WS://localhost:6041/power?user=root&password=taosdata&fineBIDialect=mysql`)。设置完成后,点击右上角的【测试连接】进行连接测试,验证成功后点击【保存】即可完成配置。
|
||||
|
||||
:::tip
|
||||
`fineBIDialect=mysql` 设置的含义是采用 MySQL 数据库的 SQL 方言规则。简单来说,就是告诉 FineBI 按照 MySQL 数据库处理 SQL 语句的特定方式来解析和执行相关的查询与操作。
|
||||
:::
|
||||
|
||||

|
||||
|
||||
## 数据分析
|
||||
|
||||
### 数据准备
|
||||
|
||||
**第 1 步**,点击【公共数据】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine`), 接着在文件夹的右侧点击【+】按钮,可创建 “数据库表” 数据集或 “SQL数据集”。
|
||||
|
||||

|
||||
|
||||
**第 2 步**,点击 “数据库表”,打开数据库选表页面,在左侧 “数据连接” 中选择已创建的连接,则在右侧会显示当前连接的数据库中的所有表,选择需要加载的表(比如 `meters`),点击【确定】即可显示 `meters` 表中的数据。
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
**第 3 步**,点击 “SQL数据集”,打开 SQL 数据集的配置页面,首先输入表名(用于在 FineBI 页面显示),接着在 “数据来自数据连接” 下拉列表中选择已创建的连接, 之后输入 SQL 语句并点击预览即可看到查询结果,最后点击【确定】SQL 数据集即可创建成功。
|
||||
|
||||

|
||||
|
||||
### 智能电表样例
|
||||
|
||||
**第 1 步**,点击【我的分析】在右侧页面中点击【新建文件夹】即可创建一个文件夹(比如 `TDengine),` 接着在文件夹的右侧点击【+】按钮,可创建 “分析主题”。
|
||||
|
||||

|
||||
|
||||
**第 2 步**,在分析主题页面选择数据集(比如 `meters`)后点击【确定】按钮,即可完成数据集关联。
|
||||
|
||||

|
||||
|
||||
**第 3 步**,点击分析主题页面下方的【组件】标签,打开图表配置页面, 拖动字段到横轴或纵轴即可展示出图表。
|
||||
|
||||

|
Before Width: | Height: | Size: 305 KiB |
After Width: | Height: | Size: 481 KiB |