Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/TS-5445-3.0
|
@ -6,6 +6,7 @@ on:
|
||||||
- 'main'
|
- 'main'
|
||||||
- '3.0'
|
- '3.0'
|
||||||
- '3.1'
|
- '3.1'
|
||||||
|
- '3.3.6'
|
||||||
- 'enh/cmake-TD-33848'
|
- 'enh/cmake-TD-33848'
|
||||||
|
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
|
|
|
@ -9,17 +9,12 @@ on:
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'packaging/**'
|
- 'packaging/**'
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
repository_dispatch:
|
|
||||||
types: [run-tests]
|
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.ref || github.event.client_payload.ref}}-${{ github.event_name == 'repository_dispatch' && 'dispatch' || ''}}
|
group: ${{ github.workflow }}-${{ github.ref }}-TDengine
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CONTAINER_NAME: 'taosd-test'
|
|
||||||
WKDIR: '/var/lib/jenkins/workspace'
|
|
||||||
WK: '/var/lib/jenkins/workspace/TDinternal'
|
|
||||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
@ -28,430 +23,71 @@ jobs:
|
||||||
group: CI
|
group: CI
|
||||||
labels: [self-hosted, Linux, X64, testing]
|
labels: [self-hosted, Linux, X64, testing]
|
||||||
outputs:
|
outputs:
|
||||||
tdinternal: ${{ steps.parameters.outputs.tdinternal }}
|
|
||||||
run_function_test: ${{ steps.parameters.outputs.run_function_test }}
|
run_function_test: ${{ steps.parameters.outputs.run_function_test }}
|
||||||
run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }}
|
run_tdgpt_test: ${{ steps.parameters.outputs.run_tdgpt_test }}
|
||||||
source_branch: ${{ steps.parameters.outputs.source_branch }}
|
|
||||||
target_branch: ${{ steps.parameters.outputs.target_branch }}
|
|
||||||
pr_number: ${{ steps.parameters.outputs.pr_number }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Determine trigger source and fetch parameters
|
- name: Determine trigger source and fetch parameters
|
||||||
id: parameters
|
id: parameters
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
# check the trigger source and get branch information
|
|
||||||
if [ "${{ github.event_name }}" == "repository_dispatch" ]; then
|
|
||||||
tdinternal="true"
|
|
||||||
source_branch=${{ github.event.client_payload.tdinternal_source_branch }}
|
|
||||||
target_branch=${{ github.event.client_payload.tdinternal_target_branch }}
|
|
||||||
pr_number=${{ github.event.client_payload.tdinternal_pr_number }}
|
|
||||||
run_tdgpt_test="true"
|
|
||||||
run_function_test="true"
|
|
||||||
else
|
|
||||||
tdinternal="false"
|
|
||||||
source_branch=${{ github.event.pull_request.head.ref }}
|
|
||||||
target_branch=${{ github.event.pull_request.base.ref }}
|
target_branch=${{ github.event.pull_request.base.ref }}
|
||||||
pr_number=${{ github.event.pull_request.number }}
|
|
||||||
|
|
||||||
# check whether to run tdgpt test cases
|
# Check whether to run tdgpt test cases
|
||||||
cd ${{ env.WKC }}
|
cd ${{ env.WKC }}
|
||||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD $target_branch`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
|
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | grep -v "^docs/en/" | grep -v "^docs/zh/" | grep -v ".md$" | tr '\n' ' ' || :)
|
||||||
|
echo "changed files exclude doc: ${changed_files_non_doc}"
|
||||||
|
|
||||||
if [[ "$changed_files_non_doc" != '' && "$changed_files_non_doc" =~ /forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics|tdgpt/ ]]; then
|
if [[ -n "$changed_files_non_doc" && "$changed_files_non_doc" =~ (forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt) ]]; then
|
||||||
run_tdgpt_test="true"
|
run_tdgpt_test="true"
|
||||||
else
|
else
|
||||||
run_tdgpt_test="false"
|
run_tdgpt_test="false"
|
||||||
fi
|
fi
|
||||||
|
echo "run tdgpt test: ${run_tdgpt_test}"
|
||||||
|
|
||||||
# check whether to run function test cases
|
# Check whether to run function test cases
|
||||||
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD $target_branch`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -Ev "forecastoperator.c|anomalywindowoperator.c|tanalytics.h|tanalytics.c|tdgpt_cases.task|analytics|tdgpt" | tr '\n' ' ' ||:)
|
changed_files_non_tdgpt=$(git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD $target_branch) | \
|
||||||
if [ "$changed_files_non_tdgpt" != '' ]; then
|
grep -v "^docs/en/" | \
|
||||||
|
grep -v "^docs/zh/" | \
|
||||||
|
grep -v ".md$" | \
|
||||||
|
grep -Ev "forecastoperator\.c|anomalywindowoperator\.c|tanalytics\.h|tanalytics\.c|tdgpt_cases\.task|analytics|tdgpt" | \
|
||||||
|
tr '\n' ' ' || :)
|
||||||
|
echo "changed files exclude tdgpt: ${changed_files_non_tdgpt}"
|
||||||
|
|
||||||
|
if [ -n "$changed_files_non_tdgpt" ]; then
|
||||||
run_function_test="true"
|
run_function_test="true"
|
||||||
else
|
else
|
||||||
run_function_test="false"
|
run_function_test="false"
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
echo "tdinternal=$tdinternal" >> $GITHUB_OUTPUT
|
echo "run function test: ${run_function_test}"
|
||||||
|
|
||||||
|
# Output the results for GitHub Actions
|
||||||
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
|
echo "run_function_test=$run_function_test" >> $GITHUB_OUTPUT
|
||||||
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
|
echo "run_tdgpt_test=$run_tdgpt_test" >> $GITHUB_OUTPUT
|
||||||
echo "source_branch=$source_branch" >> $GITHUB_OUTPUT
|
|
||||||
echo "target_branch=$target_branch" >> $GITHUB_OUTPUT
|
echo ${{ github.event.pull_request.head.ref }}
|
||||||
echo "pr_number=$pr_number" >> $GITHUB_OUTPUT
|
echo ${{ github.event.pull_request.base.ref }}
|
||||||
|
echo ${{ github.event.pull_request.number }}
|
||||||
|
|
||||||
run-tests-on-linux:
|
run-tests-on-linux:
|
||||||
|
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
|
||||||
needs: fetch-parameters
|
needs: fetch-parameters
|
||||||
runs-on:
|
if: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' || needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||||
group: CI
|
with:
|
||||||
labels: [self-hosted, Linux, X64, testing]
|
tdinternal: false
|
||||||
timeout-minutes: 200
|
run_function_test: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||||
env:
|
run_tdgpt_test: ${{ needs.fetch-parameters.outputs.run_tdgpt_test == 'true' }}
|
||||||
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
|
|
||||||
RUN_RUNCTION_TEST: ${{ needs.fetch-parameters.outputs.run_function_test }}
|
|
||||||
RUN_TDGPT_TEST: ${{ needs.fetch-parameters.outputs.run_tdgpt_test }}
|
|
||||||
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
|
|
||||||
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
|
|
||||||
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
|
|
||||||
steps:
|
|
||||||
- name: Output the environment information
|
|
||||||
run: |
|
|
||||||
echo "::group::Environment Info"
|
|
||||||
date
|
|
||||||
hostname
|
|
||||||
env
|
|
||||||
echo "Runner: ${{ runner.name }}"
|
|
||||||
echo "Trigger Source from TDinternal: ${{ env.IS_TDINTERNAL }}"
|
|
||||||
echo "Workspace: ${{ env.WKDIR }}"
|
|
||||||
git --version
|
|
||||||
echo "${{ env.WKDIR }}/restore.sh -p ${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
|
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
- name: Prepare repositories
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
prepare_environment() {
|
|
||||||
cd "$1"
|
|
||||||
git reset --hard
|
|
||||||
git clean -f
|
|
||||||
git remote prune origin
|
|
||||||
git fetch
|
|
||||||
git checkout "$2"
|
|
||||||
}
|
|
||||||
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
|
|
||||||
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
|
|
||||||
|
|
||||||
- name: Get latest codes and logs for TDinternal PR
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'true' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WK }}
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "`date "+%Y%m%d-%H%M%S"` TDinternalTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
|
||||||
git checkout -qf FETCH_HEAD
|
|
||||||
git log -5
|
|
||||||
echo "TDinternal log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
cd ${{ env.WKC }}
|
|
||||||
git remote prune origin
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
- name: Get latest codes and logs for TDengine PR
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}
|
|
||||||
git remote prune origin
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
|
||||||
git checkout -qf FETCH_HEAD
|
|
||||||
git log -5
|
|
||||||
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
cd ${{ env.WK }}
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
- name: Update submodule
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}
|
|
||||||
git submodule update --init --recursive
|
|
||||||
- name: Output the 'file_no_doc_changed' information to the file
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
|
||||||
run: |
|
|
||||||
mkdir -p ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}
|
|
||||||
changed_files_non_doc=$(git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${{ env.TARGET_BRANCH }}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | tr '\n' ' ' || :)
|
|
||||||
echo $changed_files_non_doc > ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt
|
|
||||||
- name: Check assert testing
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}/tests/parallel_test
|
|
||||||
./run_check_assert_container.sh -d ${{ env.WKDIR }}
|
|
||||||
- name: Check void function testing
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}/tests/parallel_test
|
|
||||||
./run_check_void_container.sh -d ${{ env.WKDIR }}
|
|
||||||
- name: Build docker container
|
|
||||||
if: ${{ env.RUN_RUNCTION_TEST == 'true' }}
|
|
||||||
run: |
|
|
||||||
date
|
|
||||||
rm -rf ${{ env.WKC }}/debug
|
|
||||||
cd ${{ env.WKC }}/tests/parallel_test
|
|
||||||
time ./container_build.sh -w ${{ env.WKDIR }} -e
|
|
||||||
- name: Get parameters for testing
|
|
||||||
id: get_param
|
|
||||||
run: |
|
|
||||||
log_server_file="/home/log_server.json"
|
|
||||||
timeout_cmd=""
|
|
||||||
extra_param=""
|
|
||||||
|
|
||||||
if [ -f "$log_server_file" ]; then
|
|
||||||
log_server_enabled=$(jq '.enabled' "$log_server_file")
|
|
||||||
timeout_param=$(jq '.timeout' "$log_server_file")
|
|
||||||
if [ "$timeout_param" != "null" ] && [ "$timeout_param" != "0" ]; then
|
|
||||||
timeout_cmd="timeout $timeout_param"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$log_server_enabled" == "1" ]; then
|
|
||||||
log_server=$(jq '.server' "$log_server_file" | sed 's/\\\"//g')
|
|
||||||
if [ "$log_server" != "null" ] && [ "$log_server" != "" ]; then
|
|
||||||
extra_param="-w $log_server"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
echo "timeout_cmd=$timeout_cmd" >> $GITHUB_OUTPUT
|
|
||||||
echo "extra_param=$extra_param" >> $GITHUB_OUTPUT
|
|
||||||
- name: Run function returns with a null pointer scan testing
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}/tests/parallel_test
|
|
||||||
./run_scan_container.sh -d ${{ env.WKDIR }} -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -f ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt ${{ steps.get_param.outputs.extra_param }}
|
|
||||||
- name: Run tdgpt test cases
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' && env.TARGET_BRANCH != '3.1' && env.RUN_TDGPT_TEST == 'true' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}/tests/parallel_test
|
|
||||||
export DEFAULT_RETRY_TIME=2
|
|
||||||
date
|
|
||||||
timeout 600 time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -l ${{ env.WKDIR }}/log -o 300 ${{ steps.get_param.outputs.extra_param }}
|
|
||||||
- name: Run function test cases
|
|
||||||
if: ${{ env.RUN_RUNCTION_TEST == 'true'}}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}/tests/parallel_test
|
|
||||||
export DEFAULT_RETRY_TIME=2
|
|
||||||
date
|
|
||||||
${{ steps.get_param.outputs.timeout_cmd }} time ./run.sh -e -m /home/m.json -t cases.task -b ${{ env.PR_NUMBER }}_${{ github.run_number }} -l ${{ env.WKDIR }}/log -o 1200 ${{ steps.get_param.outputs.extra_param }}
|
|
||||||
|
|
||||||
run-tests-on-mac:
|
run-tests-on-mac:
|
||||||
|
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
|
||||||
needs: fetch-parameters
|
needs: fetch-parameters
|
||||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||||
runs-on:
|
with:
|
||||||
group: CI
|
tdinternal: false
|
||||||
labels: [self-hosted, macOS, testing]
|
|
||||||
timeout-minutes: 60
|
|
||||||
env:
|
|
||||||
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
|
|
||||||
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
|
|
||||||
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
|
|
||||||
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
|
|
||||||
steps:
|
|
||||||
- name: Output the environment information
|
|
||||||
run: |
|
|
||||||
echo "::group::Environment Info"
|
|
||||||
date
|
|
||||||
hostname
|
|
||||||
env
|
|
||||||
echo "Runner: ${{ runner.name }}"
|
|
||||||
echo "Trigger Source from TDinternal: ${{ env.IS_TDINTERNAL }}"
|
|
||||||
echo "Workspace: ${{ env.WKDIR }}"
|
|
||||||
git --version
|
|
||||||
echo "${{ env.WKDIR }}/restore.sh -p ${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
|
|
||||||
echo "::endgroup::"
|
|
||||||
- name: Prepare repositories
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
prepare_environment() {
|
|
||||||
cd "$1"
|
|
||||||
git reset --hard
|
|
||||||
git clean -f
|
|
||||||
git remote prune origin
|
|
||||||
git fetch
|
|
||||||
git checkout "$2"
|
|
||||||
}
|
|
||||||
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
|
|
||||||
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
|
|
||||||
- name: Get latest codes and logs for TDinternal PR
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'true' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WK }}
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "`date "+%Y%m%d-%H%M%S"` TDinternalTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
|
||||||
git checkout -qf FETCH_HEAD
|
|
||||||
git log -5
|
|
||||||
echo "TDinternal log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
cd ${{ env.WKC }}
|
|
||||||
git remote prune origin
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
- name: Get latest codes and logs for TDengine PR
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' }}
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}
|
|
||||||
git remote prune origin
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
|
||||||
git checkout -qf FETCH_HEAD
|
|
||||||
git log -5
|
|
||||||
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
cd ${{ env.WK }}
|
|
||||||
git pull >/dev/null
|
|
||||||
git log -5
|
|
||||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
|
||||||
- name: Update submodule
|
|
||||||
run: |
|
|
||||||
cd ${{ env.WKC }}
|
|
||||||
git submodule update --init --recursive
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
date
|
|
||||||
cd ${{ env.WK }}
|
|
||||||
rm -rf debug
|
|
||||||
mkdir debug
|
|
||||||
cd ${{ env.WK }}/debug
|
|
||||||
echo $PATH
|
|
||||||
echo "PATH=/opt/homebrew/bin:$PATH" >> $GITHUB_ENV
|
|
||||||
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false -DCMAKE_BUILD_TYPE=Release
|
|
||||||
make -j10
|
|
||||||
ctest -j10 || exit 7
|
|
||||||
date
|
|
||||||
|
|
||||||
run-tests-on-windows:
|
run-tests-on-windows:
|
||||||
|
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
|
||||||
needs: fetch-parameters
|
needs: fetch-parameters
|
||||||
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
if: ${{ needs.fetch-parameters.outputs.run_function_test == 'true' }}
|
||||||
runs-on:
|
with:
|
||||||
group: CI
|
tdinternal: false
|
||||||
labels: [self-hosted, Windows, X64, testing]
|
|
||||||
timeout-minutes: 126
|
|
||||||
env:
|
|
||||||
IS_TDINTERNAL: ${{ needs.fetch-parameters.outputs.tdinternal }}
|
|
||||||
SOURCE_BRANCH: ${{ needs.fetch-parameters.outputs.source_branch }}
|
|
||||||
TARGET_BRANCH: ${{ needs.fetch-parameters.outputs.target_branch }}
|
|
||||||
PR_NUMBER: ${{ needs.fetch-parameters.outputs.pr_number }}
|
|
||||||
WIN_INTERNAL_ROOT: "C:\\workspace\\0\\TDinternal"
|
|
||||||
WIN_COMMUNITY_ROOT: "C:\\workspace\\0\\TDinternal\\community"
|
|
||||||
WIN_SYSTEM_TEST_ROOT: "C:\\workspace\\0\\TDinternal\\community\\tests\\system-test"
|
|
||||||
WIN_VS_PATH: "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat"
|
|
||||||
WIN_CPU_TYPE: "x64"
|
|
||||||
steps:
|
|
||||||
- name: Output the environment information
|
|
||||||
run: |
|
|
||||||
hostname
|
|
||||||
taskkill /f /t /im python.exe
|
|
||||||
taskkill /f /t /im bash.exe
|
|
||||||
taskkill /f /t /im taosd.exe
|
|
||||||
ipconfig
|
|
||||||
set
|
|
||||||
date /t
|
|
||||||
time /t
|
|
||||||
rd /s /Q "%WIN_INTERNAL_ROOT%\debug" || exit 0
|
|
||||||
shell: cmd
|
|
||||||
- name: Prepare repositories
|
|
||||||
run: |
|
|
||||||
:: Prepare internal repository
|
|
||||||
if exist "%WIN_INTERNAL_ROOT%" (
|
|
||||||
cd /d "%WIN_INTERNAL_ROOT%"
|
|
||||||
git reset --hard
|
|
||||||
git clean -f
|
|
||||||
git remote prune origin
|
|
||||||
git fetch
|
|
||||||
git checkout "%TARGET_BRANCH%"
|
|
||||||
) else (
|
|
||||||
echo Directory does not exist: "%WIN_INTERNAL_ROOT%"
|
|
||||||
exit 1
|
|
||||||
)
|
|
||||||
|
|
||||||
:: Prepare community repository
|
|
||||||
if exist "%WIN_COMMUNITY_ROOT%" (
|
|
||||||
cd /d "%WIN_COMMUNITY_ROOT%"
|
|
||||||
git reset --hard
|
|
||||||
git clean -f
|
|
||||||
git remote prune origin
|
|
||||||
git fetch
|
|
||||||
git checkout "%TARGET_BRANCH%"
|
|
||||||
) else (
|
|
||||||
echo Directory does not exist: "%WIN_COMMUNITY_ROOT%"
|
|
||||||
exit 1
|
|
||||||
)
|
|
||||||
shell: cmd
|
|
||||||
- name: Get latest codes and logs for TDinternal PR
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'true' }}
|
|
||||||
run: |
|
|
||||||
cd %WIN_INTERNAL_ROOT%
|
|
||||||
git pull origin %TARGET_BRANCH%
|
|
||||||
git fetch origin +refs/pull/%PR_NUMBER%/merge
|
|
||||||
git checkout -qf FETCH_HEAD
|
|
||||||
cd %WIN_COMMUNITY_ROOT%
|
|
||||||
git remote prune origin
|
|
||||||
git pull
|
|
||||||
shell: cmd
|
|
||||||
- name: Get latest codes and logs for TDengine PR
|
|
||||||
if: ${{ env.IS_TDINTERNAL == 'false' }}
|
|
||||||
run: |
|
|
||||||
cd %WIN_INTERNAL_ROOT%
|
|
||||||
git pull origin %TARGET_BRANCH%
|
|
||||||
cd %WIN_COMMUNITY_ROOT%
|
|
||||||
git remote prune origin
|
|
||||||
git pull origin %TARGET_BRANCH%
|
|
||||||
git fetch origin +refs/pull/%PR_NUMBER%/merge
|
|
||||||
git checkout -qf FETCH_HEAD
|
|
||||||
shell: cmd
|
|
||||||
- name: Output branch and log information
|
|
||||||
run: |
|
|
||||||
cd %WIN_INTERNAL_ROOT%
|
|
||||||
git branch
|
|
||||||
git log -5
|
|
||||||
|
|
||||||
cd %WIN_COMMUNITY_ROOT%
|
|
||||||
git branch
|
|
||||||
git log -5
|
|
||||||
shell: cmd
|
|
||||||
- name: Update submodule
|
|
||||||
run: |
|
|
||||||
cd %WIN_COMMUNITY_ROOT%
|
|
||||||
git submodule update --init --recursive
|
|
||||||
shell: cmd
|
|
||||||
- name: Build on windows
|
|
||||||
run: |
|
|
||||||
echo "building ..."
|
|
||||||
time /t
|
|
||||||
cd %WIN_INTERNAL_ROOT%
|
|
||||||
mkdir debug
|
|
||||||
cd debug
|
|
||||||
time /t
|
|
||||||
call "%WIN_VS_PATH%" %WIN_CPU_TYPE%
|
|
||||||
set CL=/MP8
|
|
||||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
|
|
||||||
time /t
|
|
||||||
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true -DBUILD_TOOLS=true || exit 7
|
|
||||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
|
|
||||||
time /t
|
|
||||||
jom -j 6 || exit 8
|
|
||||||
time /t
|
|
||||||
|
|
||||||
cd %WIN_COMMUNITY_ROOT%/tests/ci
|
|
||||||
pip3 install taospy==2.7.21
|
|
||||||
pip3 install taos-ws-py==0.3.8
|
|
||||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
|
||||||
shell: cmd
|
|
||||||
- name: Run ctest
|
|
||||||
run: |
|
|
||||||
echo "windows ctest ..."
|
|
||||||
time /t
|
|
||||||
cd %WIN_INTERNAL_ROOT%\\debug
|
|
||||||
ctest -j 1 || exit 7
|
|
||||||
time /t
|
|
||||||
shell: cmd
|
|
||||||
- name: Run function test
|
|
||||||
run: |
|
|
||||||
echo "windows test ..."
|
|
||||||
xcopy /e/y/i/f "%WIN_INTERNAL_ROOT%\debug\build\lib\taos.dll" C:\Windows\System32
|
|
||||||
ls -l "C:\Windows\System32\taos.dll"
|
|
||||||
time /t
|
|
||||||
cd %WIN_SYSTEM_TEST_ROOT%
|
|
||||||
echo "testing ..."
|
|
||||||
test-all.bat ci
|
|
||||||
time /t
|
|
||||||
shell: cmd
|
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
name: TDgpt CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '3.0'
|
||||||
|
paths:
|
||||||
|
- 'tools/tdgpt/**'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python-version: ["3.10"]
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: ${{ github.workspace }}/tools/tdgpt
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v3
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
cache: 'pip'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip install flake8 pytest pylint
|
||||||
|
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||||
|
|
||||||
|
- name: Checking the code with pylint
|
||||||
|
run: |
|
||||||
|
pylint $(git ls-files '*.py') --exit-zero
|
||||||
|
|
||||||
|
- name: Checking the code with flake8
|
||||||
|
run: |
|
||||||
|
# stop the build if there are Python syntax errors or undefined names
|
||||||
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
|
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||||
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
|
|
||||||
|
- name: Run test cases with pytest
|
||||||
|
run: |
|
||||||
|
pytest
|
|
@ -0,0 +1,41 @@
|
||||||
|
name: TDgpt Update Service
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '30 00 * * *'
|
||||||
|
|
||||||
|
env:
|
||||||
|
WKC: "/root/TDengine"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update-service:
|
||||||
|
runs-on:
|
||||||
|
group: CI
|
||||||
|
labels: [self-hosted, Linux, X64, tdgpt-anode-service]
|
||||||
|
steps:
|
||||||
|
- name: Update TDengine codes
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd ${{ env.WKC }}
|
||||||
|
git checkout 3.0
|
||||||
|
|
||||||
|
- name: Package the TDGpt Anode Service
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd ${{ env.WKC }}/tools/tdgpt/script && ./release.sh
|
||||||
|
|
||||||
|
- name: Reinstall and restart the TDGpt Anode Service
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cd ${{ env.WKC }}/tools/tdgpt/release
|
||||||
|
if [[ -f "TDengine-enterprise-anode-1.0.1.tar.gz" ]]; then
|
||||||
|
tar -xzf TDengine-enterprise-anode-1.0.1.tar.gz
|
||||||
|
cd TDengine-enterprise-anode-1.0.1
|
||||||
|
./install.sh
|
||||||
|
fi
|
||||||
|
systemctl restart taosanoded
|
||||||
|
|
||||||
|
- name: Clean up
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
if [[ -f ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1 ]] then rm -rf ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1; fi
|
|
@ -145,20 +145,19 @@ Query OK, 10 row(s) in set (2.415961s)
|
||||||
|
|
||||||
In TDengine, you can use the window clause to perform aggregation queries by time window partitioning, which is particularly suitable for scenarios requiring analysis of large amounts of time-series data, such as smart meters collecting data every 10s but needing to query the average temperature every 1min.
|
In TDengine, you can use the window clause to perform aggregation queries by time window partitioning, which is particularly suitable for scenarios requiring analysis of large amounts of time-series data, such as smart meters collecting data every 10s but needing to query the average temperature every 1min.
|
||||||
|
|
||||||
The window clause allows you to partition the queried data set by windows and aggregate the data within each window, including:
|
The window clause allows you to partition the queried data set by windows and aggregate the data within each window. The logic of window partitioning is shown in the following image:
|
||||||
|
|
||||||
- Time window (time window)
|
|
||||||
- State window (status window)
|
|
||||||
- Session window (session window)
|
|
||||||
- Event window (event window)
|
|
||||||
|
|
||||||
The logic of window partitioning is shown in the following image:
|
|
||||||
|
|
||||||
<figure>
|
<figure>
|
||||||
<Image img={windowModel} alt="Windowing description"/>
|
<Image img={windowModel} alt="Windowing description"/>
|
||||||
<figcaption>Figure 1. Windowing logic</figcaption>
|
<figcaption>Figure 1. Windowing logic</figcaption>
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
|
- Time Window: Data is divided based on time intervals, supporting sliding and tumbling time windows, suitable for data aggregation over fixed time periods.
|
||||||
|
- Status Window: Windows are divided based on changes in device status values, with data of the same status value grouped into one window, which closes when the status value changes.
|
||||||
|
- Session Window: Sessions are divided based on the differences in record timestamps, with records having a timestamp interval less than the predefined value belonging to the same session.
|
||||||
|
- Event Window: Windows are dynamically divided based on the start and end conditions of events, opening when the start condition is met and closing when the end condition is met.
|
||||||
|
- Count Window: Windows are divided based on the number of data rows, with each window consisting of a specified number of rows for aggregation calculations.
|
||||||
|
|
||||||
The syntax for the window clause is as follows:
|
The syntax for the window clause is as follows:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -86,9 +86,15 @@ The keep alive interval is the time interval negotiated between the client and t
|
||||||
|
|
||||||
In **Clean Session**, choose whether to clear the session. The default value is true.
|
In **Clean Session**, choose whether to clear the session. The default value is true.
|
||||||
|
|
||||||
Fill in the Topic names to be consumed in **Subscription Topics and QoS Configuration**. Use the following format: `topic1::0,topic2::1`.
|
In the **Topics Qos Config**, fill in the topic name and QoS to subscribe. Use the following format: `{topic_name}::{qos}` (e.g., `my_topic::0`). MQTT protocol 5.0 supports shared subscriptions, allowing multiple clients to subscribe to the same topic for load balancing. Use the following format: `$share/{group_name}/{topic_name}::{qos}`, where `$share` is a fixed prefix indicating the enablement of shared subscription, and `group_name` is the client group name, similar to Kafka's consumer group.
|
||||||
|
|
||||||
Click the **Check Connectivity** button to check if the data source is available.
|
In the **Topic Analysis**, fill in the MQTT topic parsing rules. The format is the same as the MQTT Topic, parsing each level of the MQTT Topic into corresponding variable names, with `_` indicating that the current level is ignored during parsing. For example: if the MQTT Topic `a/+/c` corresponds to the parsing rule `v1/v2/_`, it means assigning the first level `a` to variable `v1`, the value of the second level (where the wildcard `+` represents any value) to variable `v2`, and ignoring the value of the third level `c`, which will not be assigned to any variable. In the `payload parsing` below, the variables obtained from Topic parsing can also participate in various transformations and calculations.
|
||||||
|
|
||||||
|
In the **Compression**, configure the message body compression algorithm. After receiving the message, taosX uses the corresponding compression algorithm to decompress the message body and obtain the original data. Options include none (no compression), gzip, snappy, lz4, and zstd, with the default being none.
|
||||||
|
|
||||||
|
In the **Char Encoding**, configure the message body encoding format. After receiving the message, taosX uses the corresponding encoding format to decode the message body and obtain the original data. Options include UTF_8, GBK, GB18030, and BIG5, with the default being UTF_8.
|
||||||
|
|
||||||
|
Click the **Check Connection** button to check if the data source is available.
|
||||||
|
|
||||||
<figure>
|
<figure>
|
||||||
<Image img={imgStep05} alt=""/>
|
<Image img={imgStep05} alt=""/>
|
||||||
|
|
|
@ -339,7 +339,7 @@ Helm operates Kubernetes using kubectl and kubeconfig configurations, which can
|
||||||
The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
|
The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-enterpise-3.5.0.tgz
|
wget https://github.com/taosdata/TDengine-Operator/raw/refs/heads/3.0/helm/tdengine-enterprise-3.5.0.tgz
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that it's for the enterprise edition, and the community edition is not yet available.
|
Note that it's for the enterprise edition, and the community edition is not yet available.
|
||||||
|
|
|
@ -43,7 +43,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
||||||
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
||||||
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||||
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
||||||
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 0-86400000,in milliseconds, default value 10000|
|
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection,calculated from the time of retry,range is 3000-86400000,in milliseconds, default value 10000|
|
||||||
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
||||||
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ The TDengine client driver provides all the APIs needed for application programm
|
||||||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||||
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||||
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||||
|
|compareAsStrInGreatest | v3.3.6.0 |Supported, effective immediately |When the greatest and least functions have both numeric and string types as parameters, the comparison type conversion rules are as follows: Integer; 1: uniformly converted to string comparison, 0: uniformly converted to numeric type comparison.|
|
||||||
|
|
||||||
### Writing Related
|
### Writing Related
|
||||||
|
|
||||||
|
|
|
@ -188,9 +188,12 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
||||||
|
|
||||||
The parameters listed in this section apply to all functional modes.
|
The parameters listed in this section apply to all functional modes.
|
||||||
|
|
||||||
- **filetype**: The function to test, possible values are `insert`, `query`, and `subscribe`. Corresponding to insert, query, and subscribe functions. Only one can be specified in each configuration file.
|
- **filetype**: The function to test, possible values are `insert`, `query`, `subscribe` and `csvfile`. Corresponding to insert, query, subscribe and generate csv file functions. Only one can be specified in each configuration file.
|
||||||
|
|
||||||
- **cfgdir**: Directory where the TDengine client configuration file is located, default path is /etc/taos.
|
- **cfgdir**: Directory where the TDengine client configuration file is located, default path is /etc/taos.
|
||||||
|
|
||||||
|
- **output_dir**: The directory specified for output files. When the feature category is csvfile, it refers to the directory where the generated csv files will be saved. The default value is ./output/.
|
||||||
|
|
||||||
- **host**: Specifies the FQDN of the TDengine server to connect to, default value is localhost.
|
- **host**: Specifies the FQDN of the TDengine server to connect to, default value is localhost.
|
||||||
|
|
||||||
- **port**: The port number of the TDengine server to connect to, default value is 6030.
|
- **port**: The port number of the TDengine server to connect to, default value is 6030.
|
||||||
|
@ -283,6 +286,27 @@ Parameters related to supertable creation are configured in the `super_tables` s
|
||||||
- **repeat_ts_max** : Numeric type, when composite primary key is enabled, specifies the maximum number of records with the same timestamp to be generated
|
- **repeat_ts_max** : Numeric type, when composite primary key is enabled, specifies the maximum number of records with the same timestamp to be generated
|
||||||
- **sqls** : Array of strings type, specifies the array of sql to be executed after the supertable is successfully created, the table name specified in sql must be prefixed with the database name, otherwise an unspecified database error will occur
|
- **sqls** : Array of strings type, specifies the array of sql to be executed after the supertable is successfully created, the table name specified in sql must be prefixed with the database name, otherwise an unspecified database error will occur
|
||||||
|
|
||||||
|
- **csv_file_prefix**: String type, sets the prefix for the names of the generated csv files. Default value is "data".
|
||||||
|
|
||||||
|
- **csv_ts_format**: String type, sets the format of the time string in the names of the generated csv files, following the `strftime` format standard. If not set, files will not be split by time intervals. Supported patterns include:
|
||||||
|
- %Y: Year as a four-digit number (e.g., 2025)
|
||||||
|
- %m: Month as a two-digit number (01 to 12)
|
||||||
|
- %d: Day of the month as a two-digit number (01 to 31)
|
||||||
|
- %H: Hour in 24-hour format as a two-digit number (00 to 23)
|
||||||
|
- %M: Minute as a two-digit number (00 to 59)
|
||||||
|
- %S: Second as a two-digit number (00 to 59)
|
||||||
|
|
||||||
|
- **csv_ts_interval**: String type, sets the time interval for splitting generated csv file names. Supports daily, hourly, minute, and second intervals such as 1d/2h/30m/40s. The default value is "1d".
|
||||||
|
|
||||||
|
- **csv_output_header**: String type, sets whether the generated csv files should contain column header descriptions. The default value is "yes".
|
||||||
|
|
||||||
|
- **csv_tbname_alias**: String type, sets the alias for the tbname field in the column header descriptions of csv files. The default value is "device_id".
|
||||||
|
|
||||||
|
- **csv_compress_level**: String type, sets the compression level for generating csv-encoded data and automatically compressing it into gzip file. This process directly encodes and compresses the data, rather than first generating a csv file and then compressing it. Possible values are:
|
||||||
|
- none: No compression
|
||||||
|
- fast: gzip level 1 compression
|
||||||
|
- balance: gzip level 6 compression
|
||||||
|
- best: gzip level 9 compression
|
||||||
|
|
||||||
#### Tag and Data Columns
|
#### Tag and Data Columns
|
||||||
|
|
||||||
|
@ -478,6 +502,17 @@ Note: Data types in the taosBenchmark configuration file must be in lowercase to
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
### Export CSV File Example
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>csv-export.json</summary>
|
||||||
|
|
||||||
|
```json
|
||||||
|
{{#include /TDengine/tools/taos-tools/example/csv-export.json}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
Other json examples see [here](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
Other json examples see [here](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
||||||
|
|
||||||
## Output Performance Indicators
|
## Output Performance Indicators
|
||||||
|
|
|
@ -124,7 +124,39 @@ FLOOR(expr)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Function Description**: Gets the floor of the specified field.
|
**Function Description**: Gets the floor of the specified field.
|
||||||
Other usage notes see CEIL function description.
|
Other usage notes see [CEIL](#ceil) function description.
|
||||||
|
|
||||||
|
#### GREATEST
|
||||||
|
```sql
|
||||||
|
GREATEST(expr1, expr2[, expr]...)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Function Description**: Get the maximum value of all input parameters. The minimum number of parameters for this function is 2.
|
||||||
|
|
||||||
|
**Version**:ver-3.3.6.0
|
||||||
|
|
||||||
|
**Return Type**:Refer to the comparison rules. The comparison type is the final return type.
|
||||||
|
|
||||||
|
**Applicable Data Types**:
|
||||||
|
- Numeric types: timestamp, bool, integer and floating point types
|
||||||
|
- Strings types: nchar and varchar types.
|
||||||
|
|
||||||
|
**Comparison rules**: The following rules describe the conversion method of the comparison operation:
|
||||||
|
- If any parameter is NULL, the comparison result is NULL.
|
||||||
|
- If all parameters in the comparison operation are string types, compare them as string types
|
||||||
|
- If all parameters are numeric types, compare them as numeric types.
|
||||||
|
- If there are both string types and numeric types in the parameters, according to the `compareAsStrInGreatest` configuration item, they are uniformly compared as strings or numeric values. By default, they are compared as strings.
|
||||||
|
- In all cases, when different types are compared, the comparison type will choose the type with a larger range for comparison. For example, when comparing integer types, if there is a BIGINT type, BIGINT will definitely be selected as the comparison type.
|
||||||
|
|
||||||
|
**Related configuration items**: Client configuration, compareAsStrInGreatest is 1, which means that both string types and numeric types are converted to string comparisons, and 0 means that they are converted to numeric types. The default is 1.
|
||||||
|
|
||||||
|
|
||||||
|
#### LEAST
|
||||||
|
```sql
|
||||||
|
LEAST(expr1, expr2[, expr]...)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Function Description**:Get the minimum value of all input parameters. The rest of the description is the same as the [GREATEST](#greatest) function.
|
||||||
|
|
||||||
#### LOG
|
#### LOG
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ The sink task is responsible for receiving the output results from the agg task
|
||||||
The above three types of tasks each play their roles in the stream computing architecture, distributed at different levels. Clearly, the number of source tasks directly depends on the number of vnodes, with each source task independently handling the data in its vnode without interference from other source tasks, and there are no sequential constraints. However, it is worth noting that if the final stream computing results converge to one table, then only one sink task will be deployed on the vnode where that table is located. The collaborative relationship between these three types of tasks is shown in the following diagram, together forming the complete execution process of stream computing tasks.
|
The above three types of tasks each play their roles in the stream computing architecture, distributed at different levels. Clearly, the number of source tasks directly depends on the number of vnodes, with each source task independently handling the data in its vnode without interference from other source tasks, and there are no sequential constraints. However, it is worth noting that if the final stream computing results converge to one table, then only one sink task will be deployed on the vnode where that table is located. The collaborative relationship between these three types of tasks is shown in the following diagram, together forming the complete execution process of stream computing tasks.
|
||||||
|
|
||||||
<figure>
|
<figure>
|
||||||
<Image img={imgStep02} alt="Relationships between tasks"/>
|
<Image img={imgStep03} alt="Relationships between tasks"/>
|
||||||
<figcaption>Figure 3. Relationships between tasks</figcaption>
|
<figcaption>Figure 3. Relationships between tasks</figcaption>
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,293 @@
|
||||||
|
---
|
||||||
|
title: DST(Daylight Saving Time) Usage
|
||||||
|
description: Explanation and suggestions for using DST(Daylight Saving Time) in TDengine
|
||||||
|
---
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
In the use of time-series databases, there are times when Daylight Saving Time (DST) is encountered. We analyze and explain the use and issues of DST in TDengine to help you use TDengine more smoothly.
|
||||||
|
|
||||||
|
## Definitions
|
||||||
|
|
||||||
|
### Time Zone
|
||||||
|
|
||||||
|
A time zone is a region on Earth that uses the same standard time. Due to the Earth's rotation, to ensure that the time in each place is coordinated with the local sunrise and sunset, the world is divided into multiple time zones.
|
||||||
|
|
||||||
|
### IANA Time Zone
|
||||||
|
|
||||||
|
The IANA (Internet Assigned Numbers Authority) time zone database, also known as the tz database, provides a standard reference for global time zone information. It is the basis for modern systems and software to handle time zone-related operations.
|
||||||
|
|
||||||
|
IANA uses the "Region/City" format (e.g., Europe/Berlin) to clearly identify time zones.
|
||||||
|
|
||||||
|
TDengine supports the use of IANA time zones in different components (except for the time zone settings in Windows taos.cfg).
|
||||||
|
|
||||||
|
### Standard Time and Local Time
|
||||||
|
|
||||||
|
Standard time is the time determined based on a fixed meridian on Earth. It provides a unified reference point for each time zone.
|
||||||
|
|
||||||
|
- Greenwich Mean Time (GMT): Historically used reference time, located at the 0° meridian.
|
||||||
|
- Coordinated Universal Time (UTC): The modern time standard, similar to GMT but more precise.
|
||||||
|
|
||||||
|
The relationship between standard time and time zones is as follows:
|
||||||
|
|
||||||
|
- Reference: Standard time (e.g., UTC) is the reference point for setting time zones.
|
||||||
|
- Offset: Different time zones are defined by their offset from standard time. For example, UTC+1 means 1 hour ahead of UTC.
|
||||||
|
- Regional Division: The world is divided into multiple time zones, each using one or more standard times.
|
||||||
|
|
||||||
|
Relative to standard time, each region sets its local time based on its time zone:
|
||||||
|
|
||||||
|
- Time Zone Offset: Local time equals standard time plus the offset of the time zone. For example, UTC+2 means 2 hours ahead of UTC.
|
||||||
|
- Daylight Saving Time (DST): Some regions adjust their local time during specific periods, such as moving the clock forward by one hour. See the next section for details.
|
||||||
|
|
||||||
|
### Daylight Saving Time
|
||||||
|
|
||||||
|
Daylight Saving Time (DST) is a system that advances the time by one hour to make better use of daylight and save energy. It usually starts in spring and ends in autumn. The specific start and end times of DST vary by region. The following explanation uses Berlin time as an example to illustrate DST and its effects.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
According to this rule, you can see:
|
||||||
|
|
||||||
|
- The time between 02:00:00 and 03:00:00 (excluding 03:00:00) on March 31, 2024, in Berlin local time does not exist (jump).
|
||||||
|
- The time between 02:00:00 and 03:00:00 (excluding 03:00:00) on October 27, 2024, in Berlin local time appears twice.
|
||||||
|
|
||||||
|
#### DST and the IANA Time Zone Database
|
||||||
|
|
||||||
|
- Recording Rules: The IANA time zone database records detailed DST rules for each region, including the start and end dates and times.
|
||||||
|
- Automatic Adjustment: Many operating systems and software use the IANA database to automatically handle DST adjustments.
|
||||||
|
- Historical Changes: The IANA database also tracks historical DST changes to ensure accuracy.
|
||||||
|
|
||||||
|
#### DST and Timestamp Conversion
|
||||||
|
|
||||||
|
- Converting a timestamp to local time is deterministic. For example, 1729990654 is Berlin time DST 2024-10-27 02:57:34, and 1729994254 is Berlin time standard time 2024-10-27 02:57:34 (these two local times are the same except for the time offset).
|
||||||
|
- Without specifying the time offset, converting local time to a timestamp is indeterminate. The time skipped during DST does not exist and cannot be converted to a timestamp, such as Berlin time 2024-03-31 02:34:56 does not exist and cannot be converted to a timestamp. The repeated time during the end of DST cannot determine which timestamp it is, such as 2024-10-27 02:57:34 without specifying the time offset cannot determine whether it is 1729990654 or 1729994254. Specifying the time offset can determine the timestamp, such as 2024-10-27 02:57:34 CEST(+02:00), specifying DST 2024-10-27 02:57:34 timestamp 1729990654.
|
||||||
|
|
||||||
|
### RFC3339 Time Format
|
||||||
|
|
||||||
|
RFC 3339 is an internet time format standard used to represent dates and times. It is based on the ISO 8601 standard but specifies some format details more specifically.
|
||||||
|
|
||||||
|
The format is as follows:
|
||||||
|
|
||||||
|
- Basic Format: `YYYY-MM-DDTHH:MM:SSZ`
|
||||||
|
- Time Zone Representation:
|
||||||
|
- Z represents Coordinated Universal Time (UTC).
|
||||||
|
- Offset format, such as +02:00, represents the time difference from UTC.
|
||||||
|
|
||||||
|
With explicit time zone offsets, the RFC 3339 format can accurately parse and compare times globally.
|
||||||
|
|
||||||
|
The advantages of RFC 3339 include:
|
||||||
|
|
||||||
|
- Standardization: Provides a unified format for easy cross-system data exchange.
|
||||||
|
- Clarity: Clearly indicates time zone information, avoiding time misunderstandings.
|
||||||
|
|
||||||
|
TDengine uses the RFC3339 format for display in REST API and Explorer UI. In SQL statements, you can use the RFC3339 format to write timestamp data:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
|
||||||
|
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
|
||||||
|
```
|
||||||
|
|
||||||
|
### Undefined Behavior
|
||||||
|
|
||||||
|
Undefined behavior refers to specific code or operations that do not have a clearly defined result and do not guarantee compatibility with that result. TDengine may modify the current behavior in a future version without notifying users. Therefore, users should not rely on the current undefined behavior for judgment or application in TDengine.
|
||||||
|
|
||||||
|
## Writing and Querying DST in TDengine
|
||||||
|
|
||||||
|
We use the following table to show the impact of DST on writing and querying.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Table Explanation
|
||||||
|
|
||||||
|
- **TIMESTAMP**: TDengine uses a 64-bit integer to store raw timestamps.
|
||||||
|
- **UTC**: The UTC time representation corresponding to the timestamp.
|
||||||
|
- **Europe/Berlin**: The RFC3339 format time corresponding to the Europe/Berlin time zone.
|
||||||
|
- **Local**: The local time corresponding to the Europe/Berlin time zone (without time zone).
|
||||||
|
|
||||||
|
### Table Analysis
|
||||||
|
|
||||||
|
- At the **start of DST** (Berlin time March 31, 02:00), the time jumps directly from 02:00 to 03:00 (one hour forward).
|
||||||
|
- Light green is the timestamp one hour before the start of DST;
|
||||||
|
- Dark green is the timestamp one hour after the start of DST;
|
||||||
|
- Red indicates that the nonexistent local time was inserted into the TDengine database:
|
||||||
|
- Using SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` to insert data from `2024-03-31 02:00:00` to `2024-03-31 02:59:59` will be automatically adjusted to -1000 (in TDengine, this is undefined behavior, currently this value is related to the database precision, millisecond database is -1000, microsecond database is -1000000, nanosecond database is -1000000000), because that moment does not exist in local time;
|
||||||
|
- At the **end of DST** (Berlin time October 27, 03:00), the time jumps from 03:00 to 02:00 (one hour back).
|
||||||
|
- Light blue indicates the timestamp one hour before the clock jump;
|
||||||
|
- Dark blue indicates the timestamp within one hour after the clock jump, its local time without time zone is the same as the previous hour.
|
||||||
|
- Purple indicates the timestamp one hour after the clock jump;
|
||||||
|
- **Local Time Changes**: It can be seen that due to the adjustment of DST, local time changes, which may cause some time periods to appear repeated or missing.
|
||||||
|
- **UTC Time Unchanged**: UTC time remains unchanged, ensuring the consistency and order of time.
|
||||||
|
- **RFC3339**: The RFC3339 format time shows the change in time offset, changing to +02:00 after the start of DST and to +01:00 after the end of DST.
|
||||||
|
- **Conditional Query**:
|
||||||
|
- At the **start of DST**, the skipped time (`[03-31 02:00:00,03-31 03:00:00)`) does not exist, so using that time for queries results in undefined behavior: `SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'` (the nonexistent local timestamp is converted to `-1000`):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
|
||||||
|
ts |
|
||||||
|
=================
|
||||||
|
-1000 |
|
||||||
|
Query OK, 1 row(s) in set (0.003635s)
|
||||||
|
```
|
||||||
|
|
||||||
|
When the nonexistent timestamp is used together with the existing timestamp, the result is also not as expected, as shown below where the start local time does not exist:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
|
||||||
|
ts | to_iso8601(ts,'Z') |
|
||||||
|
==================================================
|
||||||
|
-1000 | 1969-12-31T23:59:59.000Z |
|
||||||
|
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||||
|
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||||
|
1711846800000 | 2024-03-31T01:00:00.000Z |
|
||||||
|
1711846801000 | 2024-03-31T01:00:01.000Z |
|
||||||
|
Query OK, 5 row(s) in set (0.003339s)
|
||||||
|
```
|
||||||
|
|
||||||
|
In the following statements, the first SQL query end time does not exist, and the second end time exists. The first SQL query result is not as expected:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
|
||||||
|
Query OK, 0 row(s) in set (0.000930s)
|
||||||
|
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
|
||||||
|
ts | to_iso8601(ts,'Z') |
|
||||||
|
==================================================
|
||||||
|
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||||
|
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||||
|
Query OK, 2 row(s) in set (0.001227s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- At the end of DST, the repeated time (`[10-27 02:00:00,10-27 03:00:00)` excluding `10-27 03:00:00`) appears twice, and using that time range for queries in TDengine is also undefined behavior.
|
||||||
|
- Querying the data between `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` includes the repeated timestamps and the data at `2024-10-27 03:00:00`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
|
||||||
|
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=======================================================================================
|
||||||
|
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
|
||||||
|
Query OK, 5 row(s) in set (0.001370s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- However, the following query for the range [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] can only find the data at the first 2024-10-27 02:00:00 time point:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
|
||||||
|
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=======================================================================================
|
||||||
|
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
Query OK, 1 row(s) in set (0.004480s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- The following query for the range [2024-10-27 02:00:01,2024-10-27 02:57:35] can find 3 rows of data (including one row of local time data at 02:59:59):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
|
||||||
|
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
===============================================================================================
|
||||||
|
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
Query OK, 3 row(s) in set (0.004428s)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Summary and Suggestions
|
||||||
|
|
||||||
|
### Summary
|
||||||
|
|
||||||
|
This explanation only addresses the impact of using local time. Using UNIX timestamps or RFC3339 has no impact.
|
||||||
|
|
||||||
|
- Writing:
|
||||||
|
- It is not possible to write data for nonexistent times during the DST transition.
|
||||||
|
- Writing data for repeated times during the DST transition is undefined behavior.
|
||||||
|
- Querying:
|
||||||
|
- Querying with conditions that specify the skipped time during the start of DST results in undefined behavior.
|
||||||
|
- Querying with conditions that specify the repeated time during the end of DST results in undefined behavior.
|
||||||
|
- Display:
|
||||||
|
- Displaying with time zones is not affected.
|
||||||
|
- Displaying local time is accurate, but repeated times during the end of DST cannot be distinguished.
|
||||||
|
- Users should be cautious when using time without time zones for display and application.
|
||||||
|
|
||||||
|
### Suggestions
|
||||||
|
|
||||||
|
To avoid unnecessary impacts of DST on querying and writing in TDengine, it is recommended to use explicit time offsets for writing and querying.
|
||||||
|
|
||||||
|
- Use UNIX Timestamps: Using UNIX timestamps can avoid time zone issues.
|
||||||
|
|
||||||
|
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||||
|
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||||
|
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
|
||||||
|
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
|
||||||
|
Insert OK, 2 row(s) affected (0.001434s)
|
||||||
|
|
||||||
|
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
|
||||||
|
ts | v1 |
|
||||||
|
===============================
|
||||||
|
1711846799000 | 1 |
|
||||||
|
1711846800000 | 2 |
|
||||||
|
Query OK, 2 row(s) in set (0.003503s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- Use RFC3339 Time Format: The RFC3339 time format with time zone offsets can effectively avoid the uncertainty of DST.
|
||||||
|
|
||||||
|
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||||
|
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||||
|
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
|
||||||
|
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
|
||||||
|
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
|
||||||
|
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
|
||||||
|
('2024-10-27T02:59:59.000+02:00', 2)
|
||||||
|
('2024-10-27T02:00:00.000+01:00', 3)
|
||||||
|
('2024-10-27T02:59:59.000+01:00', 4);
|
||||||
|
Insert OK, 4 row(s) affected (0.001514s)
|
||||||
|
|
||||||
|
taos> SELECT *,
|
||||||
|
to_iso8601(ts,'Z'),
|
||||||
|
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||||
|
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||||
|
AND ts <= '2024-10-27T02:59:59.000+01:00';
|
||||||
|
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=====================================================================================================
|
||||||
|
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
Query OK, 4 row(s) in set (0.004275s)
|
||||||
|
|
||||||
|
taos> SELECT *,
|
||||||
|
to_iso8601(ts,'Z'),
|
||||||
|
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||||
|
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||||
|
AND ts <= '2024-10-27T02:59:59.000+02:00';
|
||||||
|
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=====================================================================================================
|
||||||
|
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
Query OK, 2 row(s) in set (0.004275s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- Pay Attention to Time Zone Settings When Querying: When querying and displaying, if local time is needed, be sure to consider the impact of DST.
|
||||||
|
- taosAdapter: When using the REST API, it supports setting the IANA time zone, and the result is returned in RFC3339 format.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
|
||||||
|
-d "select ts from tz1.t1"
|
||||||
|
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
|
||||||
|
```
|
||||||
|
|
||||||
|
- Explorer: When using the Explorer page for SQL queries, users can configure the client time zone to display in RFC3339 format.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Reference Documents
|
||||||
|
|
||||||
|
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
|
||||||
|
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)
|
After Width: | Height: | Size: 234 KiB |
After Width: | Height: | Size: 80 KiB |
After Width: | Height: | Size: 72 KiB |
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 268 KiB |
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 75 KiB |
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 216 KiB |
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 184 KiB |
Before Width: | Height: | Size: 243 KiB After Width: | Height: | Size: 334 KiB |
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 103 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 164 KiB After Width: | Height: | Size: 95 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 293 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 90 KiB |
Before Width: | Height: | Size: 103 KiB After Width: | Height: | Size: 263 KiB |
Before Width: | Height: | Size: 282 KiB After Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 90 KiB |
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 57 KiB |
Before Width: | Height: | Size: 338 KiB After Width: | Height: | Size: 264 KiB |
Before Width: | Height: | Size: 501 KiB After Width: | Height: | Size: 342 KiB |
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 140 KiB After Width: | Height: | Size: 91 KiB |
Before Width: | Height: | Size: 96 KiB After Width: | Height: | Size: 68 KiB |
Before Width: | Height: | Size: 149 KiB After Width: | Height: | Size: 140 KiB |
Before Width: | Height: | Size: 131 KiB After Width: | Height: | Size: 109 KiB |
Before Width: | Height: | Size: 117 KiB After Width: | Height: | Size: 137 KiB |
Before Width: | Height: | Size: 133 KiB After Width: | Height: | Size: 82 KiB |
Before Width: | Height: | Size: 141 KiB After Width: | Height: | Size: 200 KiB |
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 107 KiB |
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 57 KiB |
Before Width: | Height: | Size: 178 KiB After Width: | Height: | Size: 135 KiB |
Before Width: | Height: | Size: 190 KiB After Width: | Height: | Size: 283 KiB |
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 68 KiB |
Before Width: | Height: | Size: 61 KiB After Width: | Height: | Size: 80 KiB |
|
@ -4,6 +4,10 @@ title: TDengine 数据查询
|
||||||
toc_max_heading_level: 4
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
|
import win from './window.png';
|
||||||
|
import swin from './session-window.png';
|
||||||
|
import ewin from './event-window.png';
|
||||||
|
|
||||||
相较于其他众多时序数据库和实时数据库,TDengine 的一个独特优势在于,自其首个版本发布之初便支持标准的 SQL 查询功能。这一特性极大地降低了用户在使用过程中的学习难度。本章将以智能电表的数据模型为例介绍如何在 TDengine 中运用 SQL 查询来处理时序数据。如果需要进一步了解 SQL 语法的细节和功能,建议参阅 TDengine 的官方文档。通过本章的学习,你将能够熟练掌握 TDengine 的 SQL 查询技巧,进而高效地对时序数据进行操作和分析。
|
相较于其他众多时序数据库和实时数据库,TDengine 的一个独特优势在于,自其首个版本发布之初便支持标准的 SQL 查询功能。这一特性极大地降低了用户在使用过程中的学习难度。本章将以智能电表的数据模型为例介绍如何在 TDengine 中运用 SQL 查询来处理时序数据。如果需要进一步了解 SQL 语法的细节和功能,建议参阅 TDengine 的官方文档。通过本章的学习,你将能够熟练掌握 TDengine 的 SQL 查询技巧,进而高效地对时序数据进行操作和分析。
|
||||||
|
|
||||||
## 基本查询
|
## 基本查询
|
||||||
|
@ -136,16 +140,15 @@ Query OK, 10 row(s) in set (2.415961s)
|
||||||
|
|
||||||
在 TDengine 中,你可以使用窗口子句来实现按时间窗口切分方式进行聚合结果查询,这种查询方式特别适用于需要对大量时间序列数据进行分析的场景,例如智能电表每 10s 采集一次数据,但需要查询每隔 1min 的温度平均值。
|
在 TDengine 中,你可以使用窗口子句来实现按时间窗口切分方式进行聚合结果查询,这种查询方式特别适用于需要对大量时间序列数据进行分析的场景,例如智能电表每 10s 采集一次数据,但需要查询每隔 1min 的温度平均值。
|
||||||
|
|
||||||
窗口子句允许你针对查询的数据集合按照窗口进行切分,并对每个窗口内的数据进行聚合,包含:
|
窗口子句允许你针对查询的数据集合按照窗口进行切分,并对每个窗口内的数据进行聚合。窗口划分逻辑如下图所示。
|
||||||
- 时间窗口(time window)
|
|
||||||
- 状态窗口(status window)
|
|
||||||
- 会话窗口(session window)
|
|
||||||
- 事件窗口(event window)
|
|
||||||
- 计数窗口(count window)
|
|
||||||
|
|
||||||
窗口划分逻辑如下图所示:
|
<img src={win} width="500" alt="常用窗口划分逻辑" />
|
||||||
|
|
||||||

|
- 时间窗口(time window):根据时间间隔划分数据,支持滑动时间窗口和翻转时间窗口,适用于按固定时间周期进行数据聚合。
|
||||||
|
- 状态窗口(status window):基于设备状态值的变化划分窗口,相同状态值的数据归为一个窗口,状态值改变时窗口关闭。
|
||||||
|
- 会话窗口(session window):根据记录的时间戳差异划分会话,时间戳间隔小于预设值的记录属于同一会话。
|
||||||
|
- 事件窗口(event window):基于事件的开始条件和结束条件动态划分窗口,满足开始条件时窗口开启,满足结束条件时窗口关闭。
|
||||||
|
- 计数窗口(count window):根据数据行数划分窗口,每达到指定行数即为一个窗口,并进行聚合计算。
|
||||||
|
|
||||||
窗口子句语法如下:
|
窗口子句语法如下:
|
||||||
|
|
||||||
|
@ -408,7 +411,8 @@ Query OK, 22 row(s) in set (0.153403s)
|
||||||
|
|
||||||
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30] 和 [2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。
|
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30] 和 [2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。
|
||||||
|
|
||||||

|
<img src={swin} width="320" alt="会话窗口示意图" />
|
||||||
|
|
||||||
|
|
||||||
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
|
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
|
||||||
|
|
||||||
|
@ -461,7 +465,7 @@ Query OK, 10 row(s) in set (0.043489s)
|
||||||
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10
|
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10
|
||||||
```
|
```
|
||||||
|
|
||||||

|
<img src={ewin} width="350" alt="事件窗口示意图" />
|
||||||
|
|
||||||
示例 SQL:
|
示例 SQL:
|
||||||
|
|
||||||
|
|
Before Width: | Height: | Size: 188 KiB After Width: | Height: | Size: 95 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 47 KiB |
Before Width: | Height: | Size: 188 KiB After Width: | Height: | Size: 97 KiB |
|
@ -3,7 +3,12 @@ title: "常见问题"
|
||||||
sidebar_label: "常见问题"
|
sidebar_label: "常见问题"
|
||||||
---
|
---
|
||||||
|
|
||||||
<b>1. 创建 anode 失败,返回指定服务无法访问</b>
|
### 1. 安装过程中编译 uWSGI 失败,如何处理
|
||||||
|
TDgpt 安装过程中需要在本地编译 uWSGI,某些环境的 Python(例如:anaconda)安装 uWSGI 会出现冲突导致编译失败,安装流程因此无法继续下去。这种情况下可以尝试在安装过程中忽略 uWSGI的安装。
|
||||||
|
由于忽略了 uWSGI 安装,后续启动 taosasnode 服务的时候,需要手动输入命令进行启动 `python3.10 /usr/local/taos/taosanode/lib/taosanalytics/app.py` 。 执行该命令的时候请确保使用了虚拟环境中的 Python 程序才能加载依赖库。
|
||||||
|
|
||||||
|
|
||||||
|
### 2. 创建 anode 失败,返回指定服务无法访问
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
taos> create anode '127.0.0.1:6090';
|
taos> create anode '127.0.0.1:6090';
|
||||||
|
@ -26,7 +31,7 @@ curl: (7) Failed to connect to 127.0.0.1 port 6090: Connection refused
|
||||||
|
|
||||||
>请勿使用 systemctl status taosanode 检查 taosanode 是否正常
|
>请勿使用 systemctl status taosanode 检查 taosanode 是否正常
|
||||||
|
|
||||||
<b>2. 服务正常,查询过程返回服务不可用</b>
|
### 3. 服务正常,查询过程返回服务不可用
|
||||||
```bash
|
```bash
|
||||||
taos> select _frowts,forecast(current, 'algo=arima, alpha=95, wncheck=0, rows=20') from d1 where ts<='2017-07-14 10:40:09.999';
|
taos> select _frowts,forecast(current, 'algo=arima, alpha=95, wncheck=0, rows=20') from d1 where ts<='2017-07-14 10:40:09.999';
|
||||||
|
|
||||||
|
@ -34,7 +39,7 @@ DB error: Analysis service can't access[0x80000441] (60.195613s)
|
||||||
```
|
```
|
||||||
数据分析默认超时时间是 60s,出现这个问题的原因是输入数据分析过程超过默认的最长等待时间,请尝试采用限制数据输入范围的方式将输入数据规模减小或者更换分析算法再次尝试。
|
数据分析默认超时时间是 60s,出现这个问题的原因是输入数据分析过程超过默认的最长等待时间,请尝试采用限制数据输入范围的方式将输入数据规模减小或者更换分析算法再次尝试。
|
||||||
|
|
||||||
<b>3. 返回结果出现非法 JSON 格式错误 (Invalid json format) </b>
|
### 4. 返回结果出现非法 JSON 格式错误 (Invalid json format)
|
||||||
|
|
||||||
从 anode 返回到 TDengine 的分析结果有误,请检查 anode 运行日志 `/var/log/taos/taosanode/taosanode.app.log`,以便于获得具体的错误信息。
|
从 anode 返回到 TDengine 的分析结果有误,请检查 anode 运行日志 `/var/log/taos/taosanode/taosanode.app.log`,以便于获得具体的错误信息。
|
||||||
|
|
||||||
|
|
|
@ -147,7 +147,7 @@ taosd 命令行参数如下
|
||||||
- 类型:整数
|
- 类型:整数
|
||||||
- 单位:毫秒
|
- 单位:毫秒
|
||||||
- 默认值:10000
|
- 默认值:10000
|
||||||
- 最小值:0
|
- 最小值:3000
|
||||||
- 最大值:86400000
|
- 最大值:86400000
|
||||||
- 动态修改:支持通过 SQL 修改,重启后生效
|
- 动态修改:支持通过 SQL 修改,重启后生效
|
||||||
- 支持版本:v3.3.4.0 引入
|
- 支持版本:v3.3.4.0 引入
|
||||||
|
|
|
@ -221,6 +221,12 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
||||||
- 动态修改:不支持
|
- 动态修改:不支持
|
||||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||||
|
|
||||||
|
#### compareAsStrInGreatest
|
||||||
|
- 说明:用于决定 greatest、least 函数的参数既有数值类型又有字符串类型时,比较类型的转换规则。
|
||||||
|
- 类型:整数;1:统一转为字符串比较,0:统一转为数值类型比较。
|
||||||
|
- 动态修改:支持通过 SQL 修改,立即生效
|
||||||
|
- 支持版本:从 v3.3.6.0 版本开始引入
|
||||||
|
|
||||||
### 写入相关
|
### 写入相关
|
||||||
|
|
||||||
#### smlChildTableName
|
#### smlChildTableName
|
||||||
|
|
|
@ -93,14 +93,17 @@ taosBenchmark -f <json file>
|
||||||
|
|
||||||
本节所列参数适用于所有功能模式。
|
本节所列参数适用于所有功能模式。
|
||||||
|
|
||||||
- **filetype**:功能分类,可选值为 `insert`、`query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。
|
- **filetype**:功能分类,可选值为 `insert`、`query`、`subscribe` 和 `csvfile`。分别对应插入、查询、订阅和生成 csv 文件功能。每个配置文件中只能指定其中之一。
|
||||||
|
|
||||||
- **cfgdir**:TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。
|
- **cfgdir**:TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。
|
||||||
|
|
||||||
|
- **output_dir**:指定输出文件的目录,当功能分类是 `csvfile` 时,指生成的 csv 文件的保存目录,默认值为 ./output/ 。
|
||||||
|
|
||||||
- **host**:指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost 。
|
- **host**:指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost 。
|
||||||
|
|
||||||
- **port**:要连接的 TDengine 服务器的端口号,默认值为 6030 。
|
- **port**:要连接的 TDengine 服务器的端口号,默认值为 6030 。
|
||||||
|
|
||||||
- **user**:用于连接 TDengine 服务端的用户名,默认为 root 。
|
- **user**:用于连接 TDengine 服务端的用户名,默认值为 root 。
|
||||||
|
|
||||||
- **password**:用于连接 TDengine 服务端的密码,默认值为 taosdata。
|
- **password**:用于连接 TDengine 服务端的密码,默认值为 taosdata。
|
||||||
|
|
||||||
|
@ -184,10 +187,34 @@ taosBenchmark -f <json file>
|
||||||
- **tags_file**:仅当 insert_mode 为 taosc,rest 的模式下生效。最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。
|
- **tags_file**:仅当 insert_mode 为 taosc,rest 的模式下生效。最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。
|
||||||
|
|
||||||
- **primary_key**:指定超级表是否有复合主键,取值 1 和 0,复合主键列只能是超级表的第二列,指定生成复合主键后要确保第二列符合复合主键的数据类型,否则会报错。
|
- **primary_key**:指定超级表是否有复合主键,取值 1 和 0,复合主键列只能是超级表的第二列,指定生成复合主键后要确保第二列符合复合主键的数据类型,否则会报错。
|
||||||
|
|
||||||
- **repeat_ts_min**:数值类型,复合主键开启情况下指定生成相同时间戳记录的最小个数,生成相同时间戳记录的个数是在范围[repeat_ts_min, repeat_ts_max] 内的随机值,最小值等于最大值时为固定个数。
|
- **repeat_ts_min**:数值类型,复合主键开启情况下指定生成相同时间戳记录的最小个数,生成相同时间戳记录的个数是在范围[repeat_ts_min, repeat_ts_max] 内的随机值,最小值等于最大值时为固定个数。
|
||||||
|
|
||||||
- **repeat_ts_max**:数值类型,复合主键开启情况下指定生成相同时间戳记录的最大个数。
|
- **repeat_ts_max**:数值类型,复合主键开启情况下指定生成相同时间戳记录的最大个数。
|
||||||
|
|
||||||
- **sqls**:字符串数组类型,指定超级表创建成功后要执行的 sql 数组,sql 中指定表名前面要带数据库名,否则会报未指定数据库错误。
|
- **sqls**:字符串数组类型,指定超级表创建成功后要执行的 sql 数组,sql 中指定表名前面要带数据库名,否则会报未指定数据库错误。
|
||||||
|
|
||||||
|
- **csv_file_prefix**:字符串类型,设置生成的 csv 文件名称的前缀,默认值为 data 。
|
||||||
|
|
||||||
|
- **csv_ts_format**:字符串类型,设置生成的 csv 文件名称中时间字符串的格式,格式遵循 `strftime` 格式标准,如果没有设置表示不按照时间段切分文件。支持的模式有:
|
||||||
|
- %Y: 年份,四位数表示(例如:2025)
|
||||||
|
- %m: 月份,两位数表示(01到12)
|
||||||
|
- %d: 一个月中的日子,两位数表示(01到31)
|
||||||
|
- %H: 小时,24小时制,两位数表示(00到23)
|
||||||
|
- %M: 分钟,两位数表示(00到59)
|
||||||
|
- %S: 秒,两位数表示(00到59)
|
||||||
|
|
||||||
|
- **csv_ts_interval**:字符串类型,设置生成的 csv 文件名称中时间段间隔,支持天、小时、分钟、秒级间隔,如 1d/2h/30m/40s,默认值为 1d 。
|
||||||
|
|
||||||
|
- **csv_output_header**:字符串类型,设置生成的 csv 文件是否包含列头描述,默认值为 yes 。
|
||||||
|
|
||||||
|
- **csv_tbname_alias**:字符串类型,设置 csv 文件列头描述中 tbname 字段的别名,默认值为 device_id 。
|
||||||
|
|
||||||
|
- **csv_compress_level**:字符串类型,设置生成 csv 编码数据并自动压缩成 gzip 格式文件的压缩等级。此过程直接编码并压缩,而非先生成 csv 文件再压缩。可选值为:
|
||||||
|
- none:不压缩
|
||||||
|
- fast:gzip 1级压缩
|
||||||
|
- balance:gzip 6级压缩
|
||||||
|
- best:gzip 9级压缩
|
||||||
|
|
||||||
#### 标签列与数据列
|
#### 标签列与数据列
|
||||||
|
|
||||||
|
@ -383,6 +410,17 @@ interval 控制休眠时间,避免持续查询慢查询消耗 CPU,单位为
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
### 生成 CSV 文件 JSON 示例
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>csv-export.json</summary>
|
||||||
|
|
||||||
|
```json
|
||||||
|
{{#include /TDengine/tools/taos-tools/example/csv-export.json}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
查看更多 json 配置文件示例可 [点击这里](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
查看更多 json 配置文件示例可 [点击这里](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
||||||
|
|
||||||
## 输出性能指标
|
## 输出性能指标
|
||||||
|
|
|
@ -0,0 +1,291 @@
|
||||||
|
---
|
||||||
|
title: 夏令时使用指南
|
||||||
|
description: TDengine 中关于夏令时使用问题的解释和建议
|
||||||
|
---
|
||||||
|
|
||||||
|
## 背景
|
||||||
|
|
||||||
|
在时序数据库的使用中,有时会遇到使用夏令时的情况。我们将 TDengine 中使用夏令时的情况和问题进行分析说明,以便您在 TDengine 的使用中更加顺利。
|
||||||
|
|
||||||
|
## 定义
|
||||||
|
|
||||||
|
### 时区
|
||||||
|
|
||||||
|
时区是地球上使用相同标准时间的区域。由于地球的自转,为了保证各地的时间与当地的日出日落相协调,全球划分为多个时区。
|
||||||
|
|
||||||
|
### IANA 时区
|
||||||
|
|
||||||
|
IANA(Internet Assigned Numbers Authority)时区数据库,也称为 tz database,提供全球时区信息的标准参考。它是现代各类系统和软件处理时区相关操作的基础。
|
||||||
|
|
||||||
|
IANA 使用“区域/城市”格式(如 Europe/Berlin)来明确标识时区。
|
||||||
|
|
||||||
|
TDengine 在不同组件中均支持使用 IANA 时区(除 Windows taos.cfg 时区设置外)。
|
||||||
|
|
||||||
|
### 标准时间与当地时间
|
||||||
|
|
||||||
|
标准时间是根据地球上某个固定经线确定的时间。它为各个时区提供了一个统一的参考点。
|
||||||
|
|
||||||
|
- 格林尼治标准时间(GMT):历史上使用的参考时间,位于 0° 经线。
|
||||||
|
- 协调世界时(UTC):现代的时间标准,类似于GMT,但更加精确。
|
||||||
|
|
||||||
|
标准时间与时区的关系如下:
|
||||||
|
|
||||||
|
- 基准:标准时间(如 UTC)是时区设定的基准点。
|
||||||
|
- 偏移量:不同时区通过相对于标准时间的偏移量来定义。例如,UTC+1 表示比 UTC 快 1 小时。
|
||||||
|
- 区域划分:全球被划分为多个时区,每个时区使用一个或多个标准时间。
|
||||||
|
|
||||||
|
相对于标准时间,每个地区根据其所在时区设定其当地时间:
|
||||||
|
|
||||||
|
- 时区偏移:当地时间等于标准时间加上该时区的偏移量。例如,UTC+2 表示比 UTC 时间快 2 小时。
|
||||||
|
- 夏令时(DST):某些地区在特定时间段调整当地时间,例如将时钟拨快一小时。详见下节。
|
||||||
|
|
||||||
|
### 夏令时
|
||||||
|
|
||||||
|
夏令时(Daylight Saving Time,DST)是一种通过将时间提前一小时,以充分利用日光、节约能源的制度。通常在春季开始,秋季结束。夏令时的具体开始和结束时间因地区而异。以下均以柏林时间为例,对夏令时和夏令时的影响做说明。
|
||||||
|
|
||||||
|
按照这个规则,可以看到:
|
||||||
|
|
||||||
|
- 柏林当地时间 2024 年 03 月 31 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间不存在(跳变)。
|
||||||
|
- 柏林当地时间 2024 年 10 月 27 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间出现了两次。
|
||||||
|
|
||||||
|
#### 夏令时与 IANA 时区数据库
|
||||||
|
|
||||||
|
- 记录规则:IANA 时区数据库详细记录了各地的夏令时规则,包括开始和结束的日期与时间。
|
||||||
|
- 自动调整:许多操作系统和软件利用 IANA 数据库来自动处理夏令时的调整。
|
||||||
|
- 历史变更:IANA 数据库还追踪历史上的夏令时变化,以确保准确性。
|
||||||
|
|
||||||
|
#### 夏令时与时间戳转换
|
||||||
|
|
||||||
|
- 时间戳转为当地时间是确定的。例如,1729990654 为柏林时间**夏令时** `2024-10-27 02:57:34`,1729994254 为柏林时间**冬令时** `2024-10-27 02:57:34`(这两个本地时间除时间偏移量外是一样的)。
|
||||||
|
- 不指定时间偏移量时,当地时间转为时间戳是不确定的。夏令时跳过的时间不存在会造成无法转换成时间戳,如 **柏林时间** `2024-03-31 02:34:56` 不存在,所以无法转换为时间戳。夏令时结束时重复导致无法确定是哪个时间戳,如 `2024-10-27 02:57:34` 不指定时间偏移量无法确定 是 1729990654 还是 1729994254。指定时间偏移量才能确定时间戳,如 `2024-10-27 02:57:34 CEST(+02:00) `,指定了夏令时 `2024-10-27 02:57:34` 时间戳 1729990654 。
|
||||||
|
|
||||||
|
### RFC3339 时间格式
|
||||||
|
|
||||||
|
RFC 3339 是一种互联网时间格式标准,用于表示日期和时间。它基于 ISO 8601 标准,但更具体地规定了一些格式细节。
|
||||||
|
|
||||||
|
其格式如下:
|
||||||
|
|
||||||
|
- 基本格式:`YYYY-MM-DDTHH:MM:SSZ`
|
||||||
|
- 时区表示:
|
||||||
|
- Z 表示协调世界时(UTC)。
|
||||||
|
- 偏移量格式,例如 +02:00,表示与 UTC 的时差。
|
||||||
|
|
||||||
|
通过明确的时区偏移,RFC 3339 格式可以在全球范围内准确地解析和比较时间。
|
||||||
|
|
||||||
|
RFC 3339 的优势包括:
|
||||||
|
|
||||||
|
- 标准化:提供统一的格式,方便跨系统数据交换。
|
||||||
|
- 清晰性:明确时区信息,避免时间误解。
|
||||||
|
|
||||||
|
TDengine 在 REST API 和 Explorer UI 中,均使用 RFC3339 格式进行展示。在 SQL 语句中,可使用 RFC3339 格式写入时间戳数据:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
|
||||||
|
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
|
||||||
|
```
|
||||||
|
|
||||||
|
### 未定义行为
|
||||||
|
|
||||||
|
未定义行为(Undefined Behavior)是指特定代码或操作没有明确规定的结果,也不会对该结果作出兼容性的保证,TDengine 可能在某个版本后对当前的行为作出修改而不会通知用户。所以,在 TDengine 中,用户不可依赖当前未定义的行为进行判断或应用。
|
||||||
|
|
||||||
|
## 夏令时在 TDengine 中的写入与查询
|
||||||
|
|
||||||
|
我们使用下表来展示夏令时在写入和查询中的影响。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### 表格说明
|
||||||
|
|
||||||
|
- **TIMESTAMP**:TDengine 中使用 64位整数来存储原始时间戳。
|
||||||
|
- **UTC**:时间戳对应的 UTC 时间表示。
|
||||||
|
- **Europe/Berlin**:表示时区 Europe/Berlin 对应的 RFC3339 格式时间。
|
||||||
|
- **Local**:表示时区 Europe/Berlin 对应的当地时间(不含时区)。
|
||||||
|
|
||||||
|
### 表格分析
|
||||||
|
|
||||||
|
- 在**夏令时开始**(柏林时间 3 月 31 日 02:00)时,时间直接从 02:00 跳到 03:00(往后跳一小时)。
|
||||||
|
- 浅绿色是夏令时开始前一小时的时间戳;
|
||||||
|
- 深绿色是夏令时开始后一小时的时间戳;
|
||||||
|
- 红色为 TDengine 数据库中插入了不存在的当地时间:
|
||||||
|
- 使用 SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` 插入 `2024-03-31 02:00:00` 到 `2024-03-31 02:59:59` 的数据会被自动调整为 -1000(在 TDengine 中属于未定义行为,当前该值与数据库精度 precision 有关,毫秒数据库为 -1000,微秒数据库为 -1000000,纳秒数据库为 -1000000000),因为那一时刻在本地时间中不存在;
|
||||||
|
- 在**夏令时结束**(柏林时间 10 月 27 日 03:00)时,时间从 03:00 跳到 02:00 (往前跳一小时)。
|
||||||
|
- 浅蓝色表示时钟跳变前一小时的时间戳;
|
||||||
|
- 深蓝色表示时钟跳变后一小时内的时间戳,其无时区的当地时间与上一小时一致。
|
||||||
|
- 紫色表示时钟跳变一小时后的时间戳;
|
||||||
|
- **当地时间变化**:可见,由于夏令时的调整而导致了当地时间的变化,可能导致某些时间段出现重复或缺失。
|
||||||
|
- **UTC 时间不变**:UTC 时间保持不变,确保了时间的一致性和顺序性。
|
||||||
|
- **RFC3339**:RFC3339 格式时间显示了时间偏移量的变化,在夏令时开始后变为 +02:00,结束后变为 +01:00 。
|
||||||
|
- **条件查询**:
|
||||||
|
- **夏令时开始**时,跳过的时间(`[03-31 02:00:00,03-31 03:00:00)`)不存在,所以在使用该时间进行查询时,行为不确定:`SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'`(不存在的本地时间戳被转换为 `-1000`):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
|
||||||
|
ts |
|
||||||
|
=================
|
||||||
|
-1000 |
|
||||||
|
Query OK, 1 row(s) in set (0.003635s)
|
||||||
|
```
|
||||||
|
|
||||||
|
当不存在的时间戳与存在的时间戳共同使用时,其结果同样不符合预期,以下为起始本地时间不存在:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
|
||||||
|
ts | to_iso8601(ts,'Z') |
|
||||||
|
==================================================
|
||||||
|
-1000 | 1969-12-31T23:59:59.000Z |
|
||||||
|
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||||
|
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||||
|
1711846800000 | 2024-03-31T01:00:00.000Z |
|
||||||
|
1711846801000 | 2024-03-31T01:00:01.000Z |
|
||||||
|
Query OK, 5 row(s) in set (0.003339s)
|
||||||
|
```
|
||||||
|
|
||||||
|
以下语句中第一个 SQL 查询截止时间不存在,第二个截止时间存在,第一个 SQL 查询结果不符合预期:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
|
||||||
|
Query OK, 0 row(s) in set (0.000930s)
|
||||||
|
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
|
||||||
|
ts | to_iso8601(ts,'Z') |
|
||||||
|
==================================================
|
||||||
|
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||||
|
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||||
|
Query OK, 2 row(s) in set (0.001227s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- 夏令时结束时,跳变的时间(`[10-27 02:00:00,10-27 03:00:00)` 不包含 `10-27 03:00:00`)重复了两次,TDengine 在使用该区间内的时间戳进行查询时,也属于未定义行为。
|
||||||
|
- 查询 `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` 之间的数据结果,包含了两次重复的时间戳和 `2024-10-27 03:00:00` 这个时间点的数据:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
|
||||||
|
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=======================================================================================
|
||||||
|
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
|
||||||
|
Query OK, 5 row(s) in set (0.001370s)
|
||||||
|
````
|
||||||
|
|
||||||
|
- 但以下查询 [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] 区间只能查询到第一个2024-10-27 02:00:00 时间点的数据:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
|
||||||
|
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=======================================================================================
|
||||||
|
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
Query OK, 1 row(s) in set (0.004480s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- 以下查询 `[2024-10-27 02:00:01,2024-10-27 02:57:35]` 却能查到 3 条数据(包含一条 02:59:59 的当地时间数据):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
|
||||||
|
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
================================================================================================
|
||||||
|
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
Query OK, 3 row(s) in set (0.004428s)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 总结与建议
|
||||||
|
|
||||||
|
### 总结
|
||||||
|
|
||||||
|
仅针对使用当地时间带来的影响作说明,使用 UNIX 时间戳或 RFC3339 无影响。
|
||||||
|
|
||||||
|
- 写入:
|
||||||
|
- 无法写入夏令时跳变时不存在的时间数据。
|
||||||
|
- 写入夏令时跳变时重复的时间是未定义行为。
|
||||||
|
- 查询:
|
||||||
|
- 查询条件指定夏令时开始时跳变的时间,其查询结果为未定义行为。
|
||||||
|
- 查询条件指定夏令时结束时重复的时间,其查询结果为未定义行为。
|
||||||
|
- 显示:
|
||||||
|
- 带时区显示不受影响。
|
||||||
|
- 显示当地时间是准确的,但夏令时结束时重复的时间会无法区分。
|
||||||
|
- 用户应谨慎使用不带时区的时间进行展示和应用。
|
||||||
|
|
||||||
|
### 建议
|
||||||
|
|
||||||
|
为避免夏令时给查询和写入造成不必要的影响,在 TDengine 中,建议使用明确的时间偏移量进行写入和查询。
|
||||||
|
|
||||||
|
- 使用 UNIX 时间戳:使用 UNIX 时间戳可避免时区问题。
|
||||||
|
|
||||||
|
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||||
|
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||||
|
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
|
||||||
|
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
|
||||||
|
Insert OK, 2 row(s) affected (0.001434s)
|
||||||
|
|
||||||
|
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
|
||||||
|
ts | v1 |
|
||||||
|
===============================
|
||||||
|
1711846799000 | 1 |
|
||||||
|
1711846800000 | 2 |
|
||||||
|
Query OK, 2 row(s) in set (0.003503s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- 使用 RFC3339 时间格式:带时区偏移量的 RFC3339 时间格式可以有效避免夏令时的不确定性。
|
||||||
|
|
||||||
|
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||||
|
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||||
|
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
|
||||||
|
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
|
||||||
|
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
|
||||||
|
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
|
||||||
|
|
||||||
|
```sql
|
||||||
|
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
|
||||||
|
('2024-10-27T02:59:59.000+02:00', 2)
|
||||||
|
('2024-10-27T02:00:00.000+01:00', 3)
|
||||||
|
('2024-10-27T02:59:59.000+01:00', 4);
|
||||||
|
Insert OK, 4 row(s) affected (0.001514s)
|
||||||
|
|
||||||
|
taos> SELECT *,
|
||||||
|
to_iso8601(ts,'Z'),
|
||||||
|
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||||
|
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||||
|
AND ts <= '2024-10-27T02:59:59.000+01:00';
|
||||||
|
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=====================================================================================================
|
||||||
|
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
Query OK, 4 row(s) in set (0.004275s)
|
||||||
|
|
||||||
|
taos> SELECT *,
|
||||||
|
to_iso8601(ts,'Z'),
|
||||||
|
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||||
|
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||||
|
AND ts <= '2024-10-27T02:59:59.000+02:00';
|
||||||
|
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||||
|
=====================================================================================================
|
||||||
|
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||||
|
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||||
|
Query OK, 2 row(s) in set (0.004275s)
|
||||||
|
```
|
||||||
|
|
||||||
|
- 查询时注意时区设定:在查询和显示时,如果需要本地时间,务必考虑夏令时的影响。
|
||||||
|
- taosAdapter:使用 REST API 时,支持设置 IANA 时区,结果使用 RFC3339 格式返回。
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
|
||||||
|
-d "select ts from tz1.t1"
|
||||||
|
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
|
||||||
|
```
|
||||||
|
|
||||||
|
- Explorer:使用 Explorer 页面进行 SQL 查询时,用户可配置客户端时区,以 RFC3339 格式显示。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## 参考文档
|
||||||
|
|
||||||
|
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
|
||||||
|
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)
|
After Width: | Height: | Size: 75 KiB |
After Width: | Height: | Size: 80 KiB |
After Width: | Height: | Size: 72 KiB |
|
@ -28,9 +28,10 @@ extern "C" {
|
||||||
#define ANALY_FORECAST_DEFAULT_ROWS 10
|
#define ANALY_FORECAST_DEFAULT_ROWS 10
|
||||||
#define ANALY_FORECAST_DEFAULT_CONF 95
|
#define ANALY_FORECAST_DEFAULT_CONF 95
|
||||||
#define ANALY_FORECAST_DEFAULT_WNCHECK 1
|
#define ANALY_FORECAST_DEFAULT_WNCHECK 1
|
||||||
#define ANALY_FORECAST_MAX_HISTORY_ROWS 40000
|
#define ANALY_FORECAST_MAX_ROWS 40000
|
||||||
#define ANALY_MAX_FC_ROWS 1024
|
|
||||||
#define ANALY_ANOMALY_WINDOW_MAX_ROWS 40000
|
#define ANALY_ANOMALY_WINDOW_MAX_ROWS 40000
|
||||||
|
#define ANALY_DEFAULT_TIMEOUT 60
|
||||||
|
#define ANALY_MAX_TIMEOUT 600
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
EAnalAlgoType type;
|
EAnalAlgoType type;
|
||||||
|
@ -48,7 +49,7 @@ typedef enum {
|
||||||
typedef enum {
|
typedef enum {
|
||||||
ANALYTICS_HTTP_TYPE_GET = 0,
|
ANALYTICS_HTTP_TYPE_GET = 0,
|
||||||
ANALYTICS_HTTP_TYPE_POST,
|
ANALYTICS_HTTP_TYPE_POST,
|
||||||
} EAnalHttpType;
|
} EAnalyHttpType;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
TdFilePtr filePtr;
|
TdFilePtr filePtr;
|
||||||
|
@ -66,7 +67,7 @@ typedef struct {
|
||||||
|
|
||||||
int32_t taosAnalyticsInit();
|
int32_t taosAnalyticsInit();
|
||||||
void taosAnalyticsCleanup();
|
void taosAnalyticsCleanup();
|
||||||
SJson *taosAnalySendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf);
|
SJson *taosAnalySendReqRetJson(const char *url, EAnalyHttpType type, SAnalyticBuf *pBuf, int64_t timeout);
|
||||||
|
|
||||||
int32_t taosAnalyGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen);
|
int32_t taosAnalyGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen);
|
||||||
bool taosAnalyGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen);
|
bool taosAnalyGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen);
|
||||||
|
|
|
@ -299,6 +299,7 @@ extern bool tsStreamCoverage;
|
||||||
extern int8_t tsS3EpNum;
|
extern int8_t tsS3EpNum;
|
||||||
extern int32_t tsStreamNotifyMessageSize;
|
extern int32_t tsStreamNotifyMessageSize;
|
||||||
extern int32_t tsStreamNotifyFrameSize;
|
extern int32_t tsStreamNotifyFrameSize;
|
||||||
|
extern bool tsCompareAsStrInGreatest;
|
||||||
|
|
||||||
extern bool tsExperimental;
|
extern bool tsExperimental;
|
||||||
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||||
|
|
|
@ -40,6 +40,7 @@ typedef enum {
|
||||||
ARB_QUEUE,
|
ARB_QUEUE,
|
||||||
STREAM_CTRL_QUEUE,
|
STREAM_CTRL_QUEUE,
|
||||||
STREAM_LONG_EXEC_QUEUE,
|
STREAM_LONG_EXEC_QUEUE,
|
||||||
|
STREAM_CHKPT_QUEUE,
|
||||||
QUEUE_MAX,
|
QUEUE_MAX,
|
||||||
} EQueueType;
|
} EQueueType;
|
||||||
|
|
||||||
|
|
|
@ -355,6 +355,7 @@
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_DROP, "stream-drop", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_DROP, "stream-drop", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE_TRIGGER, "stream-retri-trigger", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE_TRIGGER, "stream-retri-trigger", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_CONSEN_CHKPT, "stream-consen-chkpt", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_CONSEN_CHKPT, "stream-consen-chkpt", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_STREAM_CHKPT_EXEC, "stream-exec-chkpt", NULL, NULL)
|
||||||
TD_CLOSE_MSG_SEG(TDMT_STREAM_MSG)
|
TD_CLOSE_MSG_SEG(TDMT_STREAM_MSG)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_MON_MSG) //5 << 8
|
TD_NEW_MSG_SEG(TDMT_MON_MSG) //5 << 8
|
||||||
|
|
|
@ -276,6 +276,9 @@ typedef struct {
|
||||||
#define IS_STR_DATA_TYPE(t) \
|
#define IS_STR_DATA_TYPE(t) \
|
||||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||||
|
|
||||||
|
#define IS_COMPARE_STR_DATA_TYPE(t) \
|
||||||
|
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||||
|
|
||||||
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
||||||
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
|
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
|
||||||
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
|
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
|
||||||
|
|
|
@ -90,6 +90,8 @@ typedef enum EFunctionType {
|
||||||
FUNCTION_TYPE_DEGREES,
|
FUNCTION_TYPE_DEGREES,
|
||||||
FUNCTION_TYPE_RADIANS,
|
FUNCTION_TYPE_RADIANS,
|
||||||
FUNCTION_TYPE_TRUNCATE,
|
FUNCTION_TYPE_TRUNCATE,
|
||||||
|
FUNCTION_TYPE_GREATEST,
|
||||||
|
FUNCTION_TYPE_LEAST,
|
||||||
|
|
||||||
// string function
|
// string function
|
||||||
FUNCTION_TYPE_LENGTH = 1500,
|
FUNCTION_TYPE_LENGTH = 1500,
|
||||||
|
|
|
@ -66,6 +66,8 @@ int32_t filterPartitionCond(SNode **pCondition, SNode **pPrimaryKeyCond, SNode *
|
||||||
SNode **pOtherCond);
|
SNode **pOtherCond);
|
||||||
int32_t filterIsMultiTableColsCond(SNode *pCond, bool *res);
|
int32_t filterIsMultiTableColsCond(SNode *pCond, bool *res);
|
||||||
EConditionType filterClassifyCondition(SNode *pNode);
|
EConditionType filterClassifyCondition(SNode *pNode);
|
||||||
|
int32_t filterGetCompFunc(__compar_fn_t *func, int32_t type, int32_t optr);
|
||||||
|
bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,7 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type, int8_
|
||||||
|
|
||||||
int32_t vectorGetConvertType(int32_t type1, int32_t type2);
|
int32_t vectorGetConvertType(int32_t type1, int32_t type2);
|
||||||
int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, int32_t *overflow, int32_t startIndex, int32_t numOfRows);
|
int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, int32_t *overflow, int32_t startIndex, int32_t numOfRows);
|
||||||
|
int32_t vectorConvertSingleCol(SScalarParam *input, SScalarParam *output, int32_t type, int32_t startIndex, int32_t numOfRows);
|
||||||
|
|
||||||
/* Math functions */
|
/* Math functions */
|
||||||
int32_t absFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t absFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
@ -71,6 +72,8 @@ int32_t signFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
|
||||||
int32_t degreesFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t degreesFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
int32_t radiansFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t radiansFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
int32_t randFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t randFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t greatestFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t leastFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
|
||||||
/* String functions */
|
/* String functions */
|
||||||
int32_t lengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t lengthFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
|
|
@ -702,8 +702,8 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t clearRelHalt);
|
||||||
|
|
||||||
int32_t streamExecTask(SStreamTask* pTask);
|
int32_t streamExecTask(SStreamTask* pTask);
|
||||||
int32_t streamResumeTask(SStreamTask* pTask);
|
int32_t streamResumeTask(SStreamTask* pTask);
|
||||||
int32_t streamTrySchedExec(SStreamTask* pTask);
|
int32_t streamTrySchedExec(SStreamTask* pTask, bool chkptExec);
|
||||||
int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId, int32_t execType);
|
int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int32_t taskId, int32_t execType, bool chkptExec);
|
||||||
void streamTaskResumeInFuture(SStreamTask* pTask);
|
void streamTaskResumeInFuture(SStreamTask* pTask);
|
||||||
void streamTaskClearSchedIdleInfo(SStreamTask* pTask);
|
void streamTaskClearSchedIdleInfo(SStreamTask* pTask);
|
||||||
void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime);
|
void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime);
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "scheduler.h"
|
#include "scheduler.h"
|
||||||
#include "tcache.h"
|
#include "tcache.h"
|
||||||
#include "tcompare.h"
|
#include "tcompare.h"
|
||||||
|
#include "tconv.h"
|
||||||
#include "tglobal.h"
|
#include "tglobal.h"
|
||||||
#include "thttp.h"
|
#include "thttp.h"
|
||||||
#include "tmsg.h"
|
#include "tmsg.h"
|
||||||
|
@ -36,7 +37,6 @@
|
||||||
#include "tsched.h"
|
#include "tsched.h"
|
||||||
#include "ttime.h"
|
#include "ttime.h"
|
||||||
#include "tversion.h"
|
#include "tversion.h"
|
||||||
#include "tconv.h"
|
|
||||||
|
|
||||||
#include "cus_name.h"
|
#include "cus_name.h"
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ int64_t lastClusterId = 0;
|
||||||
int32_t clientReqRefPool = -1;
|
int32_t clientReqRefPool = -1;
|
||||||
int32_t clientConnRefPool = -1;
|
int32_t clientConnRefPool = -1;
|
||||||
int32_t clientStop = -1;
|
int32_t clientStop = -1;
|
||||||
SHashObj* pTimezoneMap = NULL;
|
SHashObj *pTimezoneMap = NULL;
|
||||||
|
|
||||||
int32_t timestampDeltaLimit = 900; // s
|
int32_t timestampDeltaLimit = 900; // s
|
||||||
|
|
||||||
|
@ -964,7 +964,7 @@ void taos_init_imp(void) {
|
||||||
ENV_ERR_RET(taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1), "failed to init cfg");
|
ENV_ERR_RET(taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1), "failed to init cfg");
|
||||||
|
|
||||||
initQueryModuleMsgHandle();
|
initQueryModuleMsgHandle();
|
||||||
if ((tsCharsetCxt = taosConvInit(tsCharset)) == NULL){
|
if ((tsCharsetCxt = taosConvInit(tsCharset)) == NULL) {
|
||||||
tscInitRes = terrno;
|
tscInitRes = terrno;
|
||||||
tscError("failed to init conv");
|
tscError("failed to init conv");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -276,7 +276,7 @@ _OVER:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosCurlPostRequest(const char *url, SCurlResp *pRsp, const char *buf, int32_t bufLen) {
|
static int32_t taosCurlPostRequest(const char *url, SCurlResp *pRsp, const char *buf, int32_t bufLen, int32_t timeout) {
|
||||||
struct curl_slist *headers = NULL;
|
struct curl_slist *headers = NULL;
|
||||||
CURL *curl = NULL;
|
CURL *curl = NULL;
|
||||||
CURLcode code = 0;
|
CURLcode code = 0;
|
||||||
|
@ -292,7 +292,7 @@ static int32_t taosCurlPostRequest(const char *url, SCurlResp *pRsp, const char
|
||||||
if (curl_easy_setopt(curl, CURLOPT_URL, url) != 0) goto _OVER;
|
if (curl_easy_setopt(curl, CURLOPT_URL, url) != 0) goto _OVER;
|
||||||
if (curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, taosCurlWriteData) != 0) goto _OVER;
|
if (curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, taosCurlWriteData) != 0) goto _OVER;
|
||||||
if (curl_easy_setopt(curl, CURLOPT_WRITEDATA, pRsp) != 0) goto _OVER;
|
if (curl_easy_setopt(curl, CURLOPT_WRITEDATA, pRsp) != 0) goto _OVER;
|
||||||
if (curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, 60000) != 0) goto _OVER;
|
if (curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, timeout) != 0) goto _OVER;
|
||||||
if (curl_easy_setopt(curl, CURLOPT_POST, 1) != 0) goto _OVER;
|
if (curl_easy_setopt(curl, CURLOPT_POST, 1) != 0) goto _OVER;
|
||||||
if (curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, bufLen) != 0) goto _OVER;
|
if (curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, bufLen) != 0) goto _OVER;
|
||||||
if (curl_easy_setopt(curl, CURLOPT_POSTFIELDS, buf) != 0) goto _OVER;
|
if (curl_easy_setopt(curl, CURLOPT_POSTFIELDS, buf) != 0) goto _OVER;
|
||||||
|
@ -311,7 +311,7 @@ _OVER:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SJson *taosAnalySendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) {
|
SJson *taosAnalySendReqRetJson(const char *url, EAnalyHttpType type, SAnalyticBuf *pBuf, int64_t timeout) {
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
char *pCont = NULL;
|
char *pCont = NULL;
|
||||||
int64_t contentLen;
|
int64_t contentLen;
|
||||||
|
@ -329,7 +329,7 @@ SJson *taosAnalySendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf
|
||||||
terrno = code;
|
terrno = code;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
if (taosCurlPostRequest(url, &curlRsp, pCont, contentLen) != 0) {
|
if (taosCurlPostRequest(url, &curlRsp, pCont, contentLen, timeout) != 0) {
|
||||||
terrno = TSDB_CODE_ANA_URL_CANT_ACCESS;
|
terrno = TSDB_CODE_ANA_URL_CANT_ACCESS;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
@ -767,7 +767,7 @@ static int32_t taosAnalyBufGetCont(SAnalyticBuf *pBuf, char **ppCont, int64_t *p
|
||||||
|
|
||||||
int32_t taosAnalyticsInit() { return 0; }
|
int32_t taosAnalyticsInit() { return 0; }
|
||||||
void taosAnalyticsCleanup() {}
|
void taosAnalyticsCleanup() {}
|
||||||
SJson *taosAnalySendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) { return NULL; }
|
SJson *taosAnalySendReqRetJson(const char *url, EAnalyHttpType type, SAnalyticBuf *pBuf, int64_t timeout) { return NULL; }
|
||||||
|
|
||||||
int32_t taosAnalyGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { return 0; }
|
int32_t taosAnalyGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { return 0; }
|
||||||
bool taosAnalyGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) { return true; }
|
bool taosAnalyGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) { return true; }
|
||||||
|
|
|
@ -14,12 +14,12 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
|
#include "tglobal.h"
|
||||||
#include "cJSON.h"
|
#include "cJSON.h"
|
||||||
#include "defines.h"
|
#include "defines.h"
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "osString.h"
|
#include "osString.h"
|
||||||
#include "tconfig.h"
|
#include "tconfig.h"
|
||||||
#include "tglobal.h"
|
|
||||||
#include "tgrant.h"
|
#include "tgrant.h"
|
||||||
#include "tjson.h"
|
#include "tjson.h"
|
||||||
#include "tlog.h"
|
#include "tlog.h"
|
||||||
|
@ -28,7 +28,6 @@
|
||||||
|
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
|
||||||
|
|
||||||
#define CONFIG_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
#define CONFIG_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||||
#define CONFIG_FILE_LEN (CONFIG_PATH_LEN + 32)
|
#define CONFIG_FILE_LEN (CONFIG_PATH_LEN + 32)
|
||||||
|
|
||||||
|
@ -117,9 +116,9 @@ bool tsMndSkipGrant = false;
|
||||||
bool tsEnableWhiteList = false; // ip white list cfg
|
bool tsEnableWhiteList = false; // ip white list cfg
|
||||||
|
|
||||||
// arbitrator
|
// arbitrator
|
||||||
int32_t tsArbHeartBeatIntervalSec = 5;
|
int32_t tsArbHeartBeatIntervalSec = 2;
|
||||||
int32_t tsArbCheckSyncIntervalSec = 10;
|
int32_t tsArbCheckSyncIntervalSec = 3;
|
||||||
int32_t tsArbSetAssignedTimeoutSec = 30;
|
int32_t tsArbSetAssignedTimeoutSec = 6;
|
||||||
|
|
||||||
// dnode
|
// dnode
|
||||||
int64_t tsDndStart = 0;
|
int64_t tsDndStart = 0;
|
||||||
|
@ -131,6 +130,8 @@ uint32_t tsEncryptionKeyChksum = 0;
|
||||||
int8_t tsEncryptionKeyStat = ENCRYPT_KEY_STAT_UNSET;
|
int8_t tsEncryptionKeyStat = ENCRYPT_KEY_STAT_UNSET;
|
||||||
int8_t tsGrant = 1;
|
int8_t tsGrant = 1;
|
||||||
|
|
||||||
|
bool tsCompareAsStrInGreatest = true;
|
||||||
|
|
||||||
// monitor
|
// monitor
|
||||||
bool tsEnableMonitor = true;
|
bool tsEnableMonitor = true;
|
||||||
int32_t tsMonitorInterval = 30;
|
int32_t tsMonitorInterval = 30;
|
||||||
|
@ -501,9 +502,7 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
|
||||||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SConfig *taosGetCfg() {
|
struct SConfig *taosGetCfg() { return tsCfg; }
|
||||||
return tsCfg;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
||||||
char *apolloUrl) {
|
char *apolloUrl) {
|
||||||
|
@ -692,7 +691,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
||||||
CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL));
|
CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL));
|
||||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxInsertBatchRows", tsMaxInsertBatchRows, 1, INT32_MAX, CFG_SCOPE_CLIENT,
|
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxInsertBatchRows", tsMaxInsertBatchRows, 1, INT32_MAX, CFG_SCOPE_CLIENT,
|
||||||
CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL) != 0);
|
CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL) != 0);
|
||||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_SERVER,
|
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 3000, 86400000, CFG_SCOPE_SERVER,
|
||||||
CFG_DYN_BOTH_LAZY, CFG_CATEGORY_GLOBAL));
|
CFG_DYN_BOTH_LAZY, CFG_CATEGORY_GLOBAL));
|
||||||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL));
|
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL));
|
||||||
TAOS_CHECK_RETURN(
|
TAOS_CHECK_RETURN(
|
||||||
|
@ -750,6 +749,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
||||||
TAOS_CHECK_RETURN(
|
TAOS_CHECK_RETURN(
|
||||||
cfgAddBool(pCfg, "streamCoverage", tsStreamCoverage, CFG_DYN_CLIENT, CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL));
|
cfgAddBool(pCfg, "streamCoverage", tsStreamCoverage, CFG_DYN_CLIENT, CFG_DYN_CLIENT, CFG_CATEGORY_LOCAL));
|
||||||
|
|
||||||
|
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "compareAsStrInGreatest", tsCompareAsStrInGreatest, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT,CFG_CATEGORY_LOCAL));
|
||||||
|
|
||||||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1483,6 +1484,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamCoverage");
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamCoverage");
|
||||||
tsStreamCoverage = pItem->bval;
|
tsStreamCoverage = pItem->bval;
|
||||||
|
|
||||||
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "compareAsStrInGreatest");
|
||||||
|
tsCompareAsStrInGreatest = pItem->bval;
|
||||||
|
|
||||||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2786,7 +2790,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) {
|
||||||
{"numOfRpcSessions", &tsNumOfRpcSessions},
|
{"numOfRpcSessions", &tsNumOfRpcSessions},
|
||||||
{"bypassFlag", &tsBypassFlag},
|
{"bypassFlag", &tsBypassFlag},
|
||||||
{"safetyCheckLevel", &tsSafetyCheckLevel},
|
{"safetyCheckLevel", &tsSafetyCheckLevel},
|
||||||
{"streamCoverage", &tsStreamCoverage}};
|
{"streamCoverage", &tsStreamCoverage},
|
||||||
|
{"compareAsStrInGreatest", &tsCompareAsStrInGreatest}};
|
||||||
|
|
||||||
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
|
if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) {
|
||||||
code = taosCfgSetOption(options, tListLen(options), pItem, false);
|
code = taosCfgSetOption(options, tListLen(options), pItem, false);
|
||||||
|
|
|
@ -47,7 +47,7 @@ int32_t smPutMsgToQueue(SSnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pMsg);
|
||||||
int32_t smPutNodeMsgToMgmtQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t smPutNodeMsgToMgmtQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
int32_t smPutNodeMsgToWriteQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t smPutNodeMsgToWriteQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
int32_t smPutNodeMsgToStreamQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t smPutNodeMsgToStreamQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg);
|
int32_t smPutNodeMsgToChkptQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -102,6 +102,8 @@ SArray *smGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
||||||
|
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_CHKPT_EXEC, smPutNodeMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
||||||
code = 0;
|
code = 0;
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
|
|
|
@ -162,6 +162,9 @@ int32_t smPutMsgToQueue(SSnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
||||||
case WRITE_QUEUE:
|
case WRITE_QUEUE:
|
||||||
code = smPutNodeMsgToWriteQueue(pMgmt, pMsg);
|
code = smPutNodeMsgToWriteQueue(pMgmt, pMsg);
|
||||||
break;
|
break;
|
||||||
|
case STREAM_CHKPT_QUEUE:
|
||||||
|
code = smPutNodeMsgToStreamQueue(pMgmt, pMsg);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
code = TSDB_CODE_INVALID_PARA;
|
code = TSDB_CODE_INVALID_PARA;
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
@ -172,7 +175,6 @@ int32_t smPutMsgToQueue(SSnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t smPutNodeMsgToMgmtQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
int32_t smPutNodeMsgToMgmtQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
int32_t code = 0;
|
|
||||||
SMultiWorker *pWorker = taosArrayGetP(pMgmt->writeWroker, 0);
|
SMultiWorker *pWorker = taosArrayGetP(pMgmt->writeWroker, 0);
|
||||||
if (pWorker == NULL) {
|
if (pWorker == NULL) {
|
||||||
return TSDB_CODE_INVALID_MSG;
|
return TSDB_CODE_INVALID_MSG;
|
||||||
|
@ -198,3 +200,10 @@ int32_t smPutNodeMsgToStreamQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
dTrace("msg:%p, put into worker %s", pMsg, pWorker->name);
|
dTrace("msg:%p, put into worker %s", pMsg, pWorker->name);
|
||||||
return taosWriteQitem(pWorker->queue, pMsg);
|
return taosWriteQitem(pWorker->queue, pMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//int32_t smPutNodeMsgToChkptQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
|
// SSingleWorker *pWorker = &pMgmt->chkptWorker;
|
||||||
|
//
|
||||||
|
// dTrace("msg:%p, put into worker %s", pMsg, pWorker->name);
|
||||||
|
// return taosWriteQitem(pWorker->queue, pMsg);
|
||||||
|
//}
|
||||||
|
|
|
@ -34,6 +34,7 @@ typedef struct SVnodeMgmt {
|
||||||
SAutoQWorkerPool streamPool;
|
SAutoQWorkerPool streamPool;
|
||||||
SAutoQWorkerPool streamLongExecPool;
|
SAutoQWorkerPool streamLongExecPool;
|
||||||
SWWorkerPool streamCtrlPool;
|
SWWorkerPool streamCtrlPool;
|
||||||
|
SWWorkerPool streamChkPool;
|
||||||
SWWorkerPool fetchPool;
|
SWWorkerPool fetchPool;
|
||||||
SSingleWorker mgmtWorker;
|
SSingleWorker mgmtWorker;
|
||||||
SSingleWorker mgmtMultiWorker;
|
SSingleWorker mgmtMultiWorker;
|
||||||
|
@ -77,6 +78,7 @@ typedef struct {
|
||||||
STaosQueue *pStreamQ;
|
STaosQueue *pStreamQ;
|
||||||
STaosQueue *pStreamCtrlQ;
|
STaosQueue *pStreamCtrlQ;
|
||||||
STaosQueue *pStreamLongExecQ;
|
STaosQueue *pStreamLongExecQ;
|
||||||
|
STaosQueue *pStreamChkQ;
|
||||||
STaosQueue *pFetchQ;
|
STaosQueue *pFetchQ;
|
||||||
STaosQueue *pMultiMgmQ;
|
STaosQueue *pMultiMgmQ;
|
||||||
} SVnodeObj;
|
} SVnodeObj;
|
||||||
|
@ -141,6 +143,7 @@ int32_t vmPutMsgToStreamQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
int32_t vmPutMsgToStreamCtrlQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t vmPutMsgToStreamCtrlQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
int32_t vmPutMsgToStreamLongExecQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t vmPutMsgToStreamLongExecQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
|
|
||||||
|
int32_t vmPutMsgToStreamChkQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
int32_t vmPutMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t vmPutMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||||
|
|
|
@ -1022,6 +1022,7 @@ SArray *vmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY, vmPutMsgToStreamLongExecQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY, vmPutMsgToStreamLongExecQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_CHKPT_EXEC, vmPutMsgToStreamChkQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_GET_STREAM_PROGRESS, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_GET_STREAM_PROGRESS, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
|
@ -407,6 +407,9 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal,
|
||||||
pVnode->pStreamLongExecQ, taosQueueItemSize(pVnode->pStreamLongExecQ));
|
pVnode->pStreamLongExecQ, taosQueueItemSize(pVnode->pStreamLongExecQ));
|
||||||
while (!taosQueueEmpty(pVnode->pStreamLongExecQ)) taosMsleep(50);
|
while (!taosQueueEmpty(pVnode->pStreamLongExecQ)) taosMsleep(50);
|
||||||
|
|
||||||
|
dInfo("vgId:%d, wait for vnode stream chkpt queue:%p is empty", pVnode->vgId, pVnode->pStreamChkQ);
|
||||||
|
while (!taosQueueEmpty(pVnode->pStreamChkQ)) taosMsleep(10);
|
||||||
|
|
||||||
dInfo("vgId:%d, all vnode queues is empty", pVnode->vgId);
|
dInfo("vgId:%d, all vnode queues is empty", pVnode->vgId);
|
||||||
|
|
||||||
dInfo("vgId:%d, post close", pVnode->vgId);
|
dInfo("vgId:%d, post close", pVnode->vgId);
|
||||||
|
|
|
@ -165,6 +165,34 @@ static void vmProcessStreamCtrlQueue(SQueueInfo *pInfo, STaosQall* pQall, int32_
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmProcessStreamChkptQueue(SQueueInfo *pInfo, STaosQall* pQall, int32_t numOfItems) {
|
||||||
|
SVnodeObj *pVnode = pInfo->ahandle;
|
||||||
|
void *pItem = NULL;
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
if (taosGetQitem(pQall, &pItem) == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
SRpcMsg *pMsg = pItem;
|
||||||
|
const STraceId *trace = &pMsg->info.traceId;
|
||||||
|
|
||||||
|
dGTrace("vgId:%d, msg:%p get from vnode-stream-chkpt queue", pVnode->vgId, pMsg);
|
||||||
|
code = vnodeProcessStreamChkptMsg(pVnode->pImpl, pMsg, pInfo);
|
||||||
|
if (code != 0) {
|
||||||
|
terrno = code;
|
||||||
|
dGError("vgId:%d, msg:%p failed to process stream chkpt msg %s since %s", pVnode->vgId, pMsg,
|
||||||
|
TMSG_INFO(pMsg->msgType), tstrerror(code));
|
||||||
|
vmSendRsp(pMsg, code);
|
||||||
|
}
|
||||||
|
|
||||||
|
dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
|
||||||
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
taosFreeQitem(pMsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void vmProcessStreamLongExecQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
static void vmProcessStreamLongExecQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||||
SVnodeObj *pVnode = pInfo->ahandle;
|
SVnodeObj *pVnode = pInfo->ahandle;
|
||||||
const STraceId *trace = &pMsg->info.traceId;
|
const STraceId *trace = &pMsg->info.traceId;
|
||||||
|
@ -301,6 +329,10 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
||||||
dGTrace("vgId:%d, msg:%p put into vnode-stream-long-exec queue", pVnode->vgId, pMsg);
|
dGTrace("vgId:%d, msg:%p put into vnode-stream-long-exec queue", pVnode->vgId, pMsg);
|
||||||
code = taosWriteQitem(pVnode->pStreamLongExecQ, pMsg);
|
code = taosWriteQitem(pVnode->pStreamLongExecQ, pMsg);
|
||||||
break;
|
break;
|
||||||
|
case STREAM_CHKPT_QUEUE:
|
||||||
|
dGTrace("vgId:%d, msg:%p put into vnode-stream-chkpt queue", pVnode->vgId, pMsg);
|
||||||
|
code = taosWriteQitem(pVnode->pStreamChkQ, pMsg);
|
||||||
|
break;
|
||||||
case FETCH_QUEUE:
|
case FETCH_QUEUE:
|
||||||
dGTrace("vgId:%d, msg:%p put into vnode-fetch queue", pVnode->vgId, pMsg);
|
dGTrace("vgId:%d, msg:%p put into vnode-fetch queue", pVnode->vgId, pMsg);
|
||||||
code = taosWriteQitem(pVnode->pFetchQ, pMsg);
|
code = taosWriteQitem(pVnode->pFetchQ, pMsg);
|
||||||
|
@ -361,6 +393,8 @@ int32_t vmPutMsgToStreamCtrlQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmP
|
||||||
|
|
||||||
int32_t vmPutMsgToStreamLongExecQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_LONG_EXEC_QUEUE); }
|
int32_t vmPutMsgToStreamLongExecQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_LONG_EXEC_QUEUE); }
|
||||||
|
|
||||||
|
int32_t vmPutMsgToStreamChkQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_CHKPT_QUEUE); }
|
||||||
|
|
||||||
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
const STraceId *trace = &pMsg->info.traceId;
|
const STraceId *trace = &pMsg->info.traceId;
|
||||||
dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
|
dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
|
||||||
|
@ -439,6 +473,8 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
|
||||||
case STREAM_LONG_EXEC_QUEUE:
|
case STREAM_LONG_EXEC_QUEUE:
|
||||||
size = taosQueueItemSize(pVnode->pStreamLongExecQ);
|
size = taosQueueItemSize(pVnode->pStreamLongExecQ);
|
||||||
break;
|
break;
|
||||||
|
case STREAM_CHKPT_QUEUE:
|
||||||
|
size = taosQueueItemSize(pVnode->pStreamChkQ);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -487,10 +523,11 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||||
pVnode->pStreamQ = tAutoQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue, 2);
|
pVnode->pStreamQ = tAutoQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue, 2);
|
||||||
pVnode->pStreamCtrlQ = tWWorkerAllocQueue(&pMgmt->streamCtrlPool, pVnode, (FItems)vmProcessStreamCtrlQueue);
|
pVnode->pStreamCtrlQ = tWWorkerAllocQueue(&pMgmt->streamCtrlPool, pVnode, (FItems)vmProcessStreamCtrlQueue);
|
||||||
pVnode->pStreamLongExecQ = tAutoQWorkerAllocQueue(&pMgmt->streamLongExecPool, pVnode, (FItem)vmProcessStreamLongExecQueue, 1);
|
pVnode->pStreamLongExecQ = tAutoQWorkerAllocQueue(&pMgmt->streamLongExecPool, pVnode, (FItem)vmProcessStreamLongExecQueue, 1);
|
||||||
|
pVnode->pStreamChkQ = tWWorkerAllocQueue(&pMgmt->streamChkPool, pVnode, (FItems)vmProcessStreamChkptQueue);
|
||||||
|
|
||||||
if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
|
if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
|
||||||
pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL
|
pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL
|
||||||
|| pVnode->pStreamCtrlQ == NULL || pVnode->pStreamLongExecQ == NULL) {
|
|| pVnode->pStreamCtrlQ == NULL || pVnode->pStreamLongExecQ == NULL || pVnode->pStreamChkQ == NULL) {
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,6 +546,8 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||||
dInfo("vgId:%d, stream-long-exec-queue:%p is alloced", pVnode->vgId, pVnode->pStreamLongExecQ);
|
dInfo("vgId:%d, stream-long-exec-queue:%p is alloced", pVnode->vgId, pVnode->pStreamLongExecQ);
|
||||||
dInfo("vgId:%d, stream-ctrl-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pStreamCtrlQ,
|
dInfo("vgId:%d, stream-ctrl-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pStreamCtrlQ,
|
||||||
taosQueueGetThreadId(pVnode->pStreamCtrlQ));
|
taosQueueGetThreadId(pVnode->pStreamCtrlQ));
|
||||||
|
dInfo("vgId:%d, stream-chk-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pStreamChkQ,
|
||||||
|
taosQueueGetThreadId(pVnode->pStreamChkQ));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -517,6 +556,7 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||||
tAutoQWorkerFreeQueue(&pMgmt->streamPool, pVnode->pStreamQ);
|
tAutoQWorkerFreeQueue(&pMgmt->streamPool, pVnode->pStreamQ);
|
||||||
tAutoQWorkerFreeQueue(&pMgmt->streamLongExecPool, pVnode->pStreamLongExecQ);
|
tAutoQWorkerFreeQueue(&pMgmt->streamLongExecPool, pVnode->pStreamLongExecQ);
|
||||||
tWWorkerFreeQueue(&pMgmt->streamCtrlPool, pVnode->pStreamCtrlQ);
|
tWWorkerFreeQueue(&pMgmt->streamCtrlPool, pVnode->pStreamCtrlQ);
|
||||||
|
tWWorkerFreeQueue(&pMgmt->streamChkPool, pVnode->pStreamChkQ);
|
||||||
tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
|
tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
|
||||||
pVnode->pQueryQ = NULL;
|
pVnode->pQueryQ = NULL;
|
||||||
pVnode->pFetchQ = NULL;
|
pVnode->pFetchQ = NULL;
|
||||||
|
@ -525,6 +565,8 @@ void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||||
pVnode->pStreamCtrlQ = NULL;
|
pVnode->pStreamCtrlQ = NULL;
|
||||||
pVnode->pStreamLongExecQ = NULL;
|
pVnode->pStreamLongExecQ = NULL;
|
||||||
|
|
||||||
|
pVnode->pStreamChkQ = NULL;
|
||||||
|
pVnode->pFetchQ = NULL;
|
||||||
dDebug("vgId:%d, queue is freed", pVnode->vgId);
|
dDebug("vgId:%d, queue is freed", pVnode->vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -554,6 +596,11 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
||||||
pStreamCtrlPool->max = 1;
|
pStreamCtrlPool->max = 1;
|
||||||
if ((code = tWWorkerInit(pStreamCtrlPool)) != 0) return code;
|
if ((code = tWWorkerInit(pStreamCtrlPool)) != 0) return code;
|
||||||
|
|
||||||
|
SWWorkerPool *pStreamChkPool = &pMgmt->streamChkPool;
|
||||||
|
pStreamChkPool->name = "vnode-stream-chkpt";
|
||||||
|
pStreamChkPool->max = 1;
|
||||||
|
if ((code = tWWorkerInit(pStreamChkPool)) != 0) return code;
|
||||||
|
|
||||||
SWWorkerPool *pFPool = &pMgmt->fetchPool;
|
SWWorkerPool *pFPool = &pMgmt->fetchPool;
|
||||||
pFPool->name = "vnode-fetch";
|
pFPool->name = "vnode-fetch";
|
||||||
pFPool->max = tsNumOfVnodeFetchThreads;
|
pFPool->max = tsNumOfVnodeFetchThreads;
|
||||||
|
@ -587,6 +634,7 @@ void vmStopWorker(SVnodeMgmt *pMgmt) {
|
||||||
tAutoQWorkerCleanup(&pMgmt->streamPool);
|
tAutoQWorkerCleanup(&pMgmt->streamPool);
|
||||||
tAutoQWorkerCleanup(&pMgmt->streamLongExecPool);
|
tAutoQWorkerCleanup(&pMgmt->streamLongExecPool);
|
||||||
tWWorkerCleanup(&pMgmt->streamCtrlPool);
|
tWWorkerCleanup(&pMgmt->streamCtrlPool);
|
||||||
|
tWWorkerCleanup(&pMgmt->streamChkPool);
|
||||||
tWWorkerCleanup(&pMgmt->fetchPool);
|
tWWorkerCleanup(&pMgmt->fetchPool);
|
||||||
dDebug("vnode workers are closed");
|
dDebug("vnode workers are closed");
|
||||||
}
|
}
|
||||||
|
|
|
@ -789,7 +789,7 @@ static int32_t mndGetAnodeAlgoList(const char *url, SAnodeObj *pObj) {
|
||||||
char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
|
char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
|
||||||
snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", url, "list");
|
snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", url, "list");
|
||||||
|
|
||||||
SJson *pJson = taosAnalySendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL);
|
SJson *pJson = taosAnalySendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL, 0);
|
||||||
if (pJson == NULL) return terrno;
|
if (pJson == NULL) return terrno;
|
||||||
|
|
||||||
int32_t code = mndDecodeAlgoList(pJson, pObj);
|
int32_t code = mndDecodeAlgoList(pJson, pObj);
|
||||||
|
@ -805,7 +805,7 @@ static int32_t mndGetAnodeStatus(SAnodeObj *pObj, char *status, int32_t statusLe
|
||||||
char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
|
char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
|
||||||
snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", pObj->url, "status");
|
snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", pObj->url, "status");
|
||||||
|
|
||||||
SJson *pJson = taosAnalySendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL);
|
SJson *pJson = taosAnalySendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL, 0);
|
||||||
if (pJson == NULL) return terrno;
|
if (pJson == NULL) return terrno;
|
||||||
|
|
||||||
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
|
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
|
||||||
|
|
|
@ -92,7 +92,7 @@ FAIL:
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t sndInit(SSnode *pSnode) {
|
int32_t sndInit(SSnode *pSnode) {
|
||||||
if (streamTaskSchedTask(&pSnode->msgCb, pSnode->pMeta->vgId, 0, 0, STREAM_EXEC_T_START_ALL_TASKS) != 0) {
|
if (streamTaskSchedTask(&pSnode->msgCb, pSnode->pMeta->vgId, 0, 0, STREAM_EXEC_T_START_ALL_TASKS, false) != 0) {
|
||||||
sndError("failed to start all tasks");
|
sndError("failed to start all tasks");
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -138,6 +138,8 @@ int32_t sndProcessStreamMsg(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
return tqStreamTaskProcessRetrieveTriggerReq(pSnode->pMeta, pMsg);
|
return tqStreamTaskProcessRetrieveTriggerReq(pSnode->pMeta, pMsg);
|
||||||
case TDMT_STREAM_RETRIEVE_TRIGGER_RSP:
|
case TDMT_STREAM_RETRIEVE_TRIGGER_RSP:
|
||||||
return tqStreamTaskProcessRetrieveTriggerRsp(pSnode->pMeta, pMsg);
|
return tqStreamTaskProcessRetrieveTriggerRsp(pSnode->pMeta, pMsg);
|
||||||
|
case TDMT_STREAM_CHKPT_EXEC:
|
||||||
|
return tqStreamTaskProcessRunReq(pSnode->pMeta, pMsg, true);
|
||||||
default:
|
default:
|
||||||
sndError("invalid snode msg:%d", pMsg->msgType);
|
sndError("invalid snode msg:%d", pMsg->msgType);
|
||||||
return TSDB_CODE_INVALID_MSG;
|
return TSDB_CODE_INVALID_MSG;
|
||||||
|
|
|
@ -114,6 +114,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||||
int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||||
int32_t vnodeProcessStreamCtrlMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
int32_t vnodeProcessStreamCtrlMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||||
int32_t vnodeProcessStreamLongExecMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
int32_t vnodeProcessStreamLongExecMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||||
|
int32_t vnodeProcessStreamChkptMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||||
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||||
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||||
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit);
|
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit);
|
||||||
|
|
|
@ -14,10 +14,19 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "meta.h"
|
#include "meta.h"
|
||||||
|
#include "vnd.h"
|
||||||
|
|
||||||
static FORCE_INLINE void *metaMalloc(void *pPool, size_t size) {
|
static FORCE_INLINE void *metaMalloc(void *pPool, size_t size) {
|
||||||
|
SVBufPool *pool = (SVBufPool *)pPool;
|
||||||
|
SVnode *pVnode = pool->pVnode;
|
||||||
|
|
||||||
|
if (pVnode->inUse && pVnode->inUse->size > pVnode->inUse->node.size) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return vnodeBufPoolMallocAligned((SVBufPool *)pPool, size);
|
return vnodeBufPoolMallocAligned((SVBufPool *)pPool, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void metaFree(void *pPool, void *p) { vnodeBufPoolFree((SVBufPool *)pPool, p); }
|
static FORCE_INLINE void metaFree(void *pPool, void *p) { vnodeBufPoolFree((SVBufPool *)pPool, p); }
|
||||||
|
|
||||||
// begin a meta txn
|
// begin a meta txn
|
||||||
|
|
|
@ -928,12 +928,6 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
|
||||||
|
|
||||||
// now the fill-history task starts to scan data from wal files.
|
// now the fill-history task starts to scan data from wal files.
|
||||||
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE);
|
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE);
|
||||||
// if (code == TSDB_CODE_SUCCESS) {
|
|
||||||
// code = tqScanWalAsync(pTq, false);
|
|
||||||
// if (code) {
|
|
||||||
// tqError("vgId:%d failed to start scan wal file, code:%s", vgId, tstrerror(code));
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,6 +148,7 @@ static void doStartScanWal(void* param, void* tmrId) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// failed to lock, try 500ms later
|
||||||
code = streamMetaTryRlock(pMeta);
|
code = streamMetaTryRlock(pMeta);
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
|
@ -156,25 +157,23 @@ static void doStartScanWal(void* param, void* tmrId) {
|
||||||
numOfTasks = 0;
|
numOfTasks = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfTasks == 0) {
|
if (numOfTasks > 0) {
|
||||||
goto _end;
|
|
||||||
}
|
|
||||||
|
|
||||||
tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d", vgId, numOfTasks);
|
tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d", vgId, numOfTasks);
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
// wait for the vnode is freed, and invalid read may occur.
|
// wait for the vnode is freed, and invalid read may occur.
|
||||||
taosMsleep(10000);
|
taosMsleep(10000);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
code = streamTaskSchedTask(&pParam->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA);
|
code = streamTaskSchedTask(&pParam->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA, false);
|
||||||
if (code) {
|
if (code) {
|
||||||
tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code));
|
tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
streamTmrStart(doStartScanWal, SCAN_WAL_IDLE_DURATION, pParam, pTimer, &pMeta->scanInfo.scanTimer, vgId, "scan-wal");
|
streamTmrStart(doStartScanWal, SCAN_WAL_IDLE_DURATION, pParam, pTimer, &pMeta->scanInfo.scanTimer, vgId, "scan-wal");
|
||||||
tqDebug("vgId:%d scan-wal will start in %dms", vgId, SCAN_WAL_IDLE_DURATION*SCAN_WAL_WAIT_COUNT);
|
tqDebug("vgId:%d try scan-wal will start in %dms", vgId, SCAN_WAL_IDLE_DURATION*SCAN_WAL_WAIT_COUNT);
|
||||||
|
|
||||||
code = taosReleaseRef(streamMetaRefPool, pParam->metaId);
|
code = taosReleaseRef(streamMetaRefPool, pParam->metaId);
|
||||||
if (code) {
|
if (code) {
|
||||||
|
@ -192,7 +191,7 @@ void tqScanWalAsync(STQ* pTq) {
|
||||||
|
|
||||||
// 1. the vnode should be the leader.
|
// 1. the vnode should be the leader.
|
||||||
// 2. the stream isn't disabled
|
// 2. the stream isn't disabled
|
||||||
if ((pMeta->role == NODE_ROLE_FOLLOWER) || tsDisableStream) {
|
if ((pMeta->role != NODE_ROLE_LEADER) || tsDisableStream) {
|
||||||
tqInfo("vgId:%d follower node or stream disabled, not scan wal", vgId);
|
tqInfo("vgId:%d follower node or stream disabled, not scan wal", vgId);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -217,7 +216,7 @@ void tqScanWalAsync(STQ* pTq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqStopStreamAllTasksAsync(SStreamMeta* pMeta, SMsgCb* pMsgCb) {
|
int32_t tqStopStreamAllTasksAsync(SStreamMeta* pMeta, SMsgCb* pMsgCb) {
|
||||||
return streamTaskSchedTask(pMsgCb, pMeta->vgId, 0, 0, STREAM_EXEC_T_STOP_ALL_TASKS);
|
return streamTaskSchedTask(pMsgCb, pMeta->vgId, 0, 0, STREAM_EXEC_T_STOP_ALL_TASKS, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
||||||
|
@ -323,7 +322,7 @@ bool taskReadyForDataFromWal(SStreamTask* pTask) {
|
||||||
// check whether input queue is full or not
|
// check whether input queue is full or not
|
||||||
if (streamQueueIsFull(pTask->inputq.queue)) {
|
if (streamQueueIsFull(pTask->inputq.queue)) {
|
||||||
tqTrace("s-task:%s input queue is full, launch task without scanning wal", pTask->id.idStr);
|
tqTrace("s-task:%s input queue is full, launch task without scanning wal", pTask->id.idStr);
|
||||||
int32_t code = streamTrySchedExec(pTask);
|
int32_t code = streamTrySchedExec(pTask, false);
|
||||||
if (code) {
|
if (code) {
|
||||||
tqError("s-task:%s failed to start task while inputQ is full", pTask->id.idStr);
|
tqError("s-task:%s failed to start task while inputQ is full", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
@ -462,7 +461,7 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, int32_t* pNumOfTasks) {
|
||||||
streamMutexUnlock(&pTask->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
if ((numOfItems > 0) || hasNewData) {
|
if ((numOfItems > 0) || hasNewData) {
|
||||||
code = streamTrySchedExec(pTask);
|
code = streamTrySchedExec(pTask, false);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||||
taosArrayDestroy(pTaskList);
|
taosArrayDestroy(pTaskList);
|
||||||
|
|
|
@ -131,7 +131,7 @@ int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart) {
|
||||||
tqDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks);
|
tqDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks);
|
||||||
|
|
||||||
int32_t type = restart ? STREAM_EXEC_T_RESTART_ALL_TASKS : STREAM_EXEC_T_START_ALL_TASKS;
|
int32_t type = restart ? STREAM_EXEC_T_RESTART_ALL_TASKS : STREAM_EXEC_T_START_ALL_TASKS;
|
||||||
return streamTaskSchedTask(cb, vgId, 0, 0, type);
|
return streamTaskSchedTask(cb, vgId, 0, 0, type, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId) {
|
int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId) {
|
||||||
|
@ -143,7 +143,7 @@ int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t stream
|
||||||
}
|
}
|
||||||
|
|
||||||
tqDebug("vgId:%d start task:0x%x async", vgId, taskId);
|
tqDebug("vgId:%d start task:0x%x async", vgId, taskId);
|
||||||
return streamTaskSchedTask(cb, vgId, streamId, taskId, STREAM_EXEC_T_START_ONE_TASK);
|
return streamTaskSchedTask(cb, vgId, streamId, taskId, STREAM_EXEC_T_START_ONE_TASK, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is to process request from transaction, always return true.
|
// this is to process request from transaction, always return true.
|
||||||
|
@ -960,11 +960,6 @@ int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
// if (scanWal && (vgId != SNODE_HANDLE)) {
|
|
||||||
// tqDebug("vgId:%d start scan wal for executing tasks", vgId);
|
|
||||||
// code = tqScanWalAsync(pMeta->ahandle, true);
|
|
||||||
// }
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1227,7 +1222,7 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
|
||||||
} else if (level == TASK_LEVEL__SOURCE && (streamQueueGetNumOfItems(pTask->inputq.queue) == 0)) {
|
} else if (level == TASK_LEVEL__SOURCE && (streamQueueGetNumOfItems(pTask->inputq.queue) == 0)) {
|
||||||
// code = tqScanWalAsync((STQ*)handle, false);
|
// code = tqScanWalAsync((STQ*)handle, false);
|
||||||
} else {
|
} else {
|
||||||
code = streamTrySchedExec(pTask);
|
code = streamTrySchedExec(pTask, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -610,11 +610,12 @@ static int32_t tsdbTryAcquireReader(STsdbReader* pReader) {
|
||||||
|
|
||||||
code = taosThreadMutexTryLock(&pReader->readerMutex);
|
code = taosThreadMutexTryLock(&pReader->readerMutex);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tsdbError("tsdb/read: %p, post-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
// Failing to acquire the lock is reasonable, not an error
|
||||||
|
tsdbWarn("tsdb/read: %p, post-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||||
} else {
|
} else {
|
||||||
tsdbTrace("tsdb/read: %p, post-trytask read mutex: %p", pReader, &pReader->readerMutex);
|
tsdbTrace("tsdb/read: %p, post-trytask read mutex: %p", pReader, &pReader->readerMutex);
|
||||||
}
|
}
|
||||||
TSDB_CHECK_CODE(code, lino, _end);
|
return code;
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
|
|
@ -940,6 +940,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
||||||
|
|
||||||
int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
||||||
vTrace("vgId:%d, msg:%p in stream queue is processing", pVnode->config.vgId, pMsg);
|
vTrace("vgId:%d, msg:%p in stream queue is processing", pVnode->config.vgId, pMsg);
|
||||||
|
|
||||||
|
// todo: NOTE: some command needs to run on follower, such as, stop_all_tasks
|
||||||
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
|
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
|
||||||
pMsg->msgType == TDMT_VND_BATCH_META) &&
|
pMsg->msgType == TDMT_VND_BATCH_META) &&
|
||||||
!syncIsReadyForRead(pVnode->sync)) {
|
!syncIsReadyForRead(pVnode->sync)) {
|
||||||
|
@ -1016,6 +1018,24 @@ int32_t vnodeProcessStreamLongExecMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t vnodeProcessStreamChkptMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
||||||
|
vTrace("vgId:%d, msg:%p in stream chkpt queue is processing", pVnode->config.vgId, pMsg);
|
||||||
|
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
|
||||||
|
pMsg->msgType == TDMT_VND_BATCH_META) &&
|
||||||
|
!syncIsReadyForRead(pVnode->sync)) {
|
||||||
|
vnodeRedirectRpcMsg(pVnode, pMsg, terrno);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (pMsg->msgType) {
|
||||||
|
case TDMT_STREAM_CHKPT_EXEC:
|
||||||
|
return tqProcessTaskRunReq(pVnode->pTq, pMsg);
|
||||||
|
default:
|
||||||
|
vError("unknown msg type:%d in stream chkpt queue", pMsg->msgType);
|
||||||
|
return TSDB_CODE_APP_ERROR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
|
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
|
||||||
int32_t code = tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
|
int32_t code = tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
|
||||||
if (code) {
|
if (code) {
|
||||||
|
|
|
@ -169,8 +169,9 @@ void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit) {
|
||||||
rpcFreeCont(rpcMsg.pCont);
|
rpcFreeCont(rpcMsg.pCont);
|
||||||
rpcMsg.pCont = NULL;
|
rpcMsg.pCont = NULL;
|
||||||
} else {
|
} else {
|
||||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &rpcMsg) < 0) {
|
int32_t code = 0;
|
||||||
vTrace("vgId:%d, failed to put vnode commit to queue since %s", pVnode->config.vgId, terrstr());
|
if ((code = tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &rpcMsg)) < 0) {
|
||||||
|
vError("vgId:%d, failed to put vnode commit to write_queue since %s", pVnode->config.vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -449,7 +450,9 @@ static int32_t vnodeSyncApplyMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsm
|
||||||
pVnode->config.vgId, pFsm, pMeta->index, pMeta->term, pMsg->info.conn.applyIndex, pMeta->isWeak, pMeta->code,
|
pVnode->config.vgId, pFsm, pMeta->index, pMeta->term, pMsg->info.conn.applyIndex, pMeta->isWeak, pMeta->code,
|
||||||
pMeta->state, syncStr(pMeta->state), TMSG_INFO(pMsg->msgType), pMsg->code);
|
pMeta->state, syncStr(pMeta->state), TMSG_INFO(pMsg->msgType), pMsg->code);
|
||||||
|
|
||||||
return tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
|
int32_t code = tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
|
||||||
|
if (code < 0) vError("vgId:%d, failed to put into apply_queue since %s", pVnode->config.vgId, tstrerror(code));
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
||||||
|
@ -594,7 +597,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
tqInfo("vgId:%d stream task already loaded, start them", vgId);
|
tqInfo("vgId:%d stream task already loaded, start them", vgId);
|
||||||
int32_t code = streamTaskSchedTask(&pVnode->msgCb, TD_VID(pVnode), 0, 0, STREAM_EXEC_T_START_ALL_TASKS);
|
int32_t code = streamTaskSchedTask(&pVnode->msgCb, TD_VID(pVnode), 0, 0, STREAM_EXEC_T_START_ALL_TASKS, false);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
tqError("vgId:%d failed to sched stream task, code:%s", vgId, tstrerror(code));
|
tqError("vgId:%d failed to sched stream task, code:%s", vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,7 @@ typedef struct {
|
||||||
char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN];
|
char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN];
|
||||||
char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN];
|
char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN];
|
||||||
char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
|
char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
|
||||||
|
int64_t timeout;
|
||||||
SAnomalyWindowSupp anomalySup;
|
SAnomalyWindowSupp anomalySup;
|
||||||
SWindowRowsSup anomalyWinRowSup;
|
SWindowRowsSup anomalyWinRowSup;
|
||||||
SColumn anomalyCol;
|
SColumn anomalyCol;
|
||||||
|
@ -89,6 +90,20 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool hasTimeout = taosAnalyGetOptInt(pAnomalyNode->anomalyOpt, "timeout", &pInfo->timeout);
|
||||||
|
if (!hasTimeout) {
|
||||||
|
qDebug("not set the timeout val, set default:%d", ANALY_DEFAULT_TIMEOUT);
|
||||||
|
pInfo->timeout = ANALY_DEFAULT_TIMEOUT;
|
||||||
|
} else {
|
||||||
|
if (pInfo->timeout <= 0 || pInfo->timeout > ANALY_MAX_TIMEOUT) {
|
||||||
|
qDebug("timeout val:%" PRId64 "s is invalid (greater than 10min or less than 1s), use default:%dms",
|
||||||
|
pInfo->timeout, ANALY_DEFAULT_TIMEOUT);
|
||||||
|
pInfo->timeout = ANALY_DEFAULT_TIMEOUT;
|
||||||
|
} else {
|
||||||
|
qDebug("timeout val is set to: %" PRId64 "s", pInfo->timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pOperator->exprSupp.hasWindowOrGroup = true;
|
pOperator->exprSupp.hasWindowOrGroup = true;
|
||||||
pInfo->tsSlotId = ((SColumnNode*)pAnomalyNode->window.pTspk)->slotId;
|
pInfo->tsSlotId = ((SColumnNode*)pAnomalyNode->window.pTspk)->slotId;
|
||||||
tstrncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt));
|
tstrncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt));
|
||||||
|
@ -451,7 +466,7 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
|
||||||
code = taosAnalyBufClose(&analyBuf);
|
code = taosAnalyBufClose(&analyBuf);
|
||||||
QUERY_CHECK_CODE(code, lino, _OVER);
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
pJson = taosAnalySendReqRetJson(pInfo->algoUrl, ANALYTICS_HTTP_TYPE_POST, &analyBuf);
|
pJson = taosAnalySendReqRetJson(pInfo->algoUrl, ANALYTICS_HTTP_TYPE_POST, &analyBuf, pInfo->timeout * 1000);
|
||||||
if (pJson == NULL) {
|
if (pJson == NULL) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
|
|
|
@ -38,6 +38,7 @@ typedef struct {
|
||||||
int64_t optRows;
|
int64_t optRows;
|
||||||
int64_t cachedRows;
|
int64_t cachedRows;
|
||||||
int32_t numOfBlocks;
|
int32_t numOfBlocks;
|
||||||
|
int64_t timeout;
|
||||||
int16_t resTsSlot;
|
int16_t resTsSlot;
|
||||||
int16_t resValSlot;
|
int16_t resValSlot;
|
||||||
int16_t resLowSlot;
|
int16_t resLowSlot;
|
||||||
|
@ -76,10 +77,10 @@ static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock, con
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
SAnalyticBuf* pBuf = &pSupp->analyBuf;
|
SAnalyticBuf* pBuf = &pSupp->analyBuf;
|
||||||
|
|
||||||
if (pSupp->cachedRows > ANALY_FORECAST_MAX_HISTORY_ROWS) {
|
if (pSupp->cachedRows > ANALY_FORECAST_MAX_ROWS) {
|
||||||
code = TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
|
code = TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
|
||||||
qError("%s rows:%" PRId64 " for forecast cache, error happens, code:%s, upper limit:%d", id, pSupp->cachedRows,
|
qError("%s rows:%" PRId64 " for forecast cache, error happens, code:%s, upper limit:%d", id, pSupp->cachedRows,
|
||||||
tstrerror(code), ANALY_FORECAST_MAX_HISTORY_ROWS);
|
tstrerror(code), ANALY_FORECAST_MAX_ROWS);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,8 +158,8 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp, const char* id) {
|
||||||
qDebug("%s forecast rows not found from %s, use default:%" PRId64, id, pSupp->algoOpt, pSupp->optRows);
|
qDebug("%s forecast rows not found from %s, use default:%" PRId64, id, pSupp->algoOpt, pSupp->optRows);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pSupp->optRows > ANALY_MAX_FC_ROWS) {
|
if (pSupp->optRows > ANALY_FORECAST_MAX_ROWS) {
|
||||||
qError("%s required too many forecast rows, max allowed:%d, required:%" PRId64, id, ANALY_MAX_FC_ROWS,
|
qError("%s required too many forecast rows, max allowed:%d, required:%" PRId64, id, ANALY_FORECAST_MAX_ROWS,
|
||||||
pSupp->optRows);
|
pSupp->optRows);
|
||||||
return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
|
return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
|
||||||
}
|
}
|
||||||
|
@ -198,12 +199,12 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp, const char* id) {
|
||||||
static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock, const char* pId) {
|
static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock, const char* pId) {
|
||||||
SAnalyticBuf* pBuf = &pSupp->analyBuf;
|
SAnalyticBuf* pBuf = &pSupp->analyBuf;
|
||||||
int32_t resCurRow = pBlock->info.rows;
|
int32_t resCurRow = pBlock->info.rows;
|
||||||
int8_t tmpI8;
|
int8_t tmpI8 = 0;
|
||||||
int16_t tmpI16;
|
int16_t tmpI16 = 0;
|
||||||
int32_t tmpI32;
|
int32_t tmpI32 = 0;
|
||||||
int64_t tmpI64;
|
int64_t tmpI64 = 0;
|
||||||
float tmpFloat;
|
float tmpFloat = 0;
|
||||||
double tmpDouble;
|
double tmpDouble = 0;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
SColumnInfoData* pResValCol = taosArrayGet(pBlock->pDataBlock, pSupp->resValSlot);
|
SColumnInfoData* pResValCol = taosArrayGet(pBlock->pDataBlock, pSupp->resValSlot);
|
||||||
|
@ -211,12 +212,13 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock, const
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
SColumnInfoData* pResTsCol = (pSupp->resTsSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resTsSlot) : NULL);
|
SColumnInfoData* pResTsCol = ((pSupp->resTsSlot != -1) ? taosArrayGet(pBlock->pDataBlock, pSupp->resTsSlot) : NULL);
|
||||||
SColumnInfoData* pResLowCol = (pSupp->resLowSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resLowSlot) : NULL);
|
SColumnInfoData* pResLowCol =
|
||||||
|
((pSupp->resLowSlot != -1) ? taosArrayGet(pBlock->pDataBlock, pSupp->resLowSlot) : NULL);
|
||||||
SColumnInfoData* pResHighCol =
|
SColumnInfoData* pResHighCol =
|
||||||
(pSupp->resHighSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resHighSlot) : NULL);
|
(pSupp->resHighSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resHighSlot) : NULL);
|
||||||
|
|
||||||
SJson* pJson = taosAnalySendReqRetJson(pSupp->algoUrl, ANALYTICS_HTTP_TYPE_POST, pBuf);
|
SJson* pJson = taosAnalySendReqRetJson(pSupp->algoUrl, ANALYTICS_HTTP_TYPE_POST, pBuf, pSupp->timeout * 1000);
|
||||||
if (pJson == NULL) {
|
if (pJson == NULL) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
@ -527,18 +529,32 @@ static int32_t forecastParseInput(SForecastSupp* pSupp, SNodeList* pFuncs) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t forecastParseAlgo(SForecastSupp* pSupp) {
|
static int32_t forecastParseAlgo(SForecastSupp* pSupp, const char* id) {
|
||||||
pSupp->maxTs = 0;
|
pSupp->maxTs = 0;
|
||||||
pSupp->minTs = INT64_MAX;
|
pSupp->minTs = INT64_MAX;
|
||||||
pSupp->numOfRows = 0;
|
pSupp->numOfRows = 0;
|
||||||
|
|
||||||
if (!taosAnalyGetOptStr(pSupp->algoOpt, "algo", pSupp->algoName, sizeof(pSupp->algoName))) {
|
if (!taosAnalyGetOptStr(pSupp->algoOpt, "algo", pSupp->algoName, sizeof(pSupp->algoName))) {
|
||||||
qError("failed to get forecast algorithm name from %s", pSupp->algoOpt);
|
qError("%s failed to get forecast algorithm name from %s", id, pSupp->algoOpt);
|
||||||
return TSDB_CODE_ANA_ALGO_NOT_FOUND;
|
return TSDB_CODE_ANA_ALGO_NOT_FOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool hasTimeout = taosAnalyGetOptInt(pSupp->algoOpt, "timeout", &pSupp->timeout);
|
||||||
|
if (!hasTimeout) {
|
||||||
|
qDebug("%s not set the timeout val, set default:%d", id, ANALY_DEFAULT_TIMEOUT);
|
||||||
|
pSupp->timeout = ANALY_DEFAULT_TIMEOUT;
|
||||||
|
} else {
|
||||||
|
if (pSupp->timeout <= 0 || pSupp->timeout > ANALY_MAX_TIMEOUT) {
|
||||||
|
qDebug("%s timeout val:%" PRId64 "s is invalid (greater than 10min or less than 1s), use default:%dms",
|
||||||
|
id, pSupp->timeout, ANALY_DEFAULT_TIMEOUT);
|
||||||
|
pSupp->timeout = ANALY_DEFAULT_TIMEOUT;
|
||||||
|
} else {
|
||||||
|
qDebug("%s timeout val is set to: %" PRId64 "s", id, pSupp->timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (taosAnalyGetAlgoUrl(pSupp->algoName, ANALY_ALGO_TYPE_FORECAST, pSupp->algoUrl, sizeof(pSupp->algoUrl)) != 0) {
|
if (taosAnalyGetAlgoUrl(pSupp->algoName, ANALY_ALGO_TYPE_FORECAST, pSupp->algoUrl, sizeof(pSupp->algoUrl)) != 0) {
|
||||||
qError("failed to get forecast algorithm url from %s", pSupp->algoName);
|
qError("%s failed to get forecast algorithm url from %s", id, pSupp->algoName);
|
||||||
return TSDB_CODE_ANA_ALGO_NOT_LOAD;
|
return TSDB_CODE_ANA_ALGO_NOT_LOAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -589,6 +605,7 @@ int32_t createForecastOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNo
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char* pId = pTaskInfo->id.str;
|
||||||
SForecastSupp* pSupp = &pInfo->forecastSupp;
|
SForecastSupp* pSupp = &pInfo->forecastSupp;
|
||||||
SForecastFuncPhysiNode* pForecastPhyNode = (SForecastFuncPhysiNode*)pPhyNode;
|
SForecastFuncPhysiNode* pForecastPhyNode = (SForecastFuncPhysiNode*)pPhyNode;
|
||||||
SExprSupp* pExprSup = &pOperator->exprSupp;
|
SExprSupp* pExprSup = &pOperator->exprSupp;
|
||||||
|
@ -620,7 +637,7 @@ int32_t createForecastOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNo
|
||||||
code = forecastParseOutput(pSupp, pExprSup);
|
code = forecastParseOutput(pSupp, pExprSup);
|
||||||
QUERY_CHECK_CODE(code, lino, _error);
|
QUERY_CHECK_CODE(code, lino, _error);
|
||||||
|
|
||||||
code = forecastParseAlgo(pSupp);
|
code = forecastParseAlgo(pSupp, pId);
|
||||||
QUERY_CHECK_CODE(code, lino, _error);
|
QUERY_CHECK_CODE(code, lino, _error);
|
||||||
|
|
||||||
code = forecastCreateBuf(pSupp);
|
code = forecastCreateBuf(pSupp);
|
||||||
|
@ -644,7 +661,7 @@ int32_t createForecastOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNo
|
||||||
|
|
||||||
*pOptrInfo = pOperator;
|
*pOptrInfo = pOperator;
|
||||||
|
|
||||||
qDebug("forecast env is initialized, option:%s", pSupp->algoOpt);
|
qDebug("%s forecast env is initialized, option:%s", pId, pSupp->algoOpt);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
_error:
|
_error:
|
||||||
|
|
|
@ -57,11 +57,12 @@ void destroyStreamCountAggOperatorInfo(void* param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
destroyStreamBasicInfo(&pInfo->basic);
|
destroyStreamBasicInfo(&pInfo->basic);
|
||||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
|
||||||
cleanupExprSupp(&pInfo->scalarSupp);
|
cleanupExprSupp(&pInfo->scalarSupp);
|
||||||
clearGroupResInfo(&pInfo->groupResInfo);
|
clearGroupResInfo(&pInfo->groupResInfo);
|
||||||
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
||||||
pInfo->pUpdated = NULL;
|
pInfo->pUpdated = NULL;
|
||||||
|
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||||
|
|
||||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||||
blockDataDestroy(pInfo->pDelRes);
|
blockDataDestroy(pInfo->pDelRes);
|
||||||
|
|
|
@ -56,10 +56,11 @@ void destroyStreamEventOperatorInfo(void* param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
destroyStreamBasicInfo(&pInfo->basic);
|
destroyStreamBasicInfo(&pInfo->basic);
|
||||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
|
||||||
clearGroupResInfo(&pInfo->groupResInfo);
|
clearGroupResInfo(&pInfo->groupResInfo);
|
||||||
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
||||||
pInfo->pUpdated = NULL;
|
pInfo->pUpdated = NULL;
|
||||||
|
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||||
|
|
||||||
cleanupExprSupp(&pInfo->scalarSupp);
|
cleanupExprSupp(&pInfo->scalarSupp);
|
||||||
if (pInfo->pChildren != NULL) {
|
if (pInfo->pChildren != NULL) {
|
||||||
|
|
|
@ -151,7 +151,7 @@ void destroyStreamTimeSliceOperatorInfo(void* param) {
|
||||||
pInfo->pOperator = NULL;
|
pInfo->pOperator = NULL;
|
||||||
}
|
}
|
||||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
|
||||||
resetPrevAndNextWindow(pInfo->pFillSup);
|
resetPrevAndNextWindow(pInfo->pFillSup);
|
||||||
destroyStreamFillSupporter(pInfo->pFillSup);
|
destroyStreamFillSupporter(pInfo->pFillSup);
|
||||||
destroyStreamFillInfo(pInfo->pFillInfo);
|
destroyStreamFillInfo(pInfo->pFillInfo);
|
||||||
|
@ -179,6 +179,7 @@ void destroyStreamTimeSliceOperatorInfo(void* param) {
|
||||||
taosArrayDestroy(pInfo->historyWins);
|
taosArrayDestroy(pInfo->historyWins);
|
||||||
|
|
||||||
taosArrayDestroy(pInfo->pCloseTs);
|
taosArrayDestroy(pInfo->pCloseTs);
|
||||||
|
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||||
|
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
}
|
}
|
||||||
|
|
|
@ -462,7 +462,7 @@ _end:
|
||||||
|
|
||||||
void destroyFlusedPos(void* pRes) {
|
void destroyFlusedPos(void* pRes) {
|
||||||
SRowBuffPos* pPos = (SRowBuffPos*)pRes;
|
SRowBuffPos* pPos = (SRowBuffPos*)pRes;
|
||||||
if (!pPos->needFree && !pPos->pRowBuff) {
|
if (pPos->needFree && !pPos->pRowBuff) {
|
||||||
taosMemoryFreeClear(pPos->pKey);
|
taosMemoryFreeClear(pPos->pKey);
|
||||||
taosMemoryFree(pPos);
|
taosMemoryFree(pPos);
|
||||||
}
|
}
|
||||||
|
@ -475,10 +475,12 @@ void destroyFlusedppPos(void* ppRes) {
|
||||||
|
|
||||||
void clearGroupResInfo(SGroupResInfo* pGroupResInfo) {
|
void clearGroupResInfo(SGroupResInfo* pGroupResInfo) {
|
||||||
int32_t size = taosArrayGetSize(pGroupResInfo->pRows);
|
int32_t size = taosArrayGetSize(pGroupResInfo->pRows);
|
||||||
|
if (pGroupResInfo->index >= 0 && pGroupResInfo->index < size) {
|
||||||
for (int32_t i = pGroupResInfo->index; i < size; i++) {
|
for (int32_t i = pGroupResInfo->index; i < size; i++) {
|
||||||
void* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
void* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||||
destroyFlusedPos(pPos);
|
destroyFlusedPos(pPos);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
pGroupResInfo->freeItem = false;
|
pGroupResInfo->freeItem = false;
|
||||||
taosArrayDestroy(pGroupResInfo->pRows);
|
taosArrayDestroy(pGroupResInfo->pRows);
|
||||||
pGroupResInfo->pRows = NULL;
|
pGroupResInfo->pRows = NULL;
|
||||||
|
@ -2204,11 +2206,12 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
destroyStreamBasicInfo(&pInfo->basic);
|
destroyStreamBasicInfo(&pInfo->basic);
|
||||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
|
||||||
cleanupExprSupp(&pInfo->scalarSupp);
|
cleanupExprSupp(&pInfo->scalarSupp);
|
||||||
clearGroupResInfo(&pInfo->groupResInfo);
|
clearGroupResInfo(&pInfo->groupResInfo);
|
||||||
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
||||||
pInfo->pUpdated = NULL;
|
pInfo->pUpdated = NULL;
|
||||||
|
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||||
|
|
||||||
if (pInfo->pChildren != NULL) {
|
if (pInfo->pChildren != NULL) {
|
||||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||||
|
@ -4442,10 +4445,11 @@ void destroyStreamStateOperatorInfo(void* param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
destroyStreamBasicInfo(&pInfo->basic);
|
destroyStreamBasicInfo(&pInfo->basic);
|
||||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
|
||||||
clearGroupResInfo(&pInfo->groupResInfo);
|
clearGroupResInfo(&pInfo->groupResInfo);
|
||||||
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
||||||
pInfo->pUpdated = NULL;
|
pInfo->pUpdated = NULL;
|
||||||
|
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||||
|
|
||||||
cleanupExprSupp(&pInfo->scalarSupp);
|
cleanupExprSupp(&pInfo->scalarSupp);
|
||||||
if (pInfo->pChildren != NULL) {
|
if (pInfo->pChildren != NULL) {
|
||||||
|
|
|
@ -22,6 +22,9 @@
|
||||||
#include "tanalytics.h"
|
#include "tanalytics.h"
|
||||||
#include "taoserror.h"
|
#include "taoserror.h"
|
||||||
#include "ttime.h"
|
#include "ttime.h"
|
||||||
|
#include "functionMgt.h"
|
||||||
|
#include "ttypes.h"
|
||||||
|
#include "tglobal.h"
|
||||||
|
|
||||||
static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) {
|
static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) {
|
||||||
va_list vArgList;
|
va_list vArgList;
|
||||||
|
@ -1745,6 +1748,62 @@ static int32_t translateHistogramPartial(SFunctionNode* pFunc, char* pErrBuf, in
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define NUMERIC_TO_STRINGS_LEN 25
|
||||||
|
static int32_t translateGreatestleast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
|
FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len));
|
||||||
|
|
||||||
|
bool mixTypeToStrings = tsCompareAsStrInGreatest;
|
||||||
|
|
||||||
|
SDataType res = {.type = 0};
|
||||||
|
bool resInit = false;
|
||||||
|
for (int32_t i = 0; i < LIST_LENGTH(pFunc->pParameterList); i++) {
|
||||||
|
SDataType* para = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, i));
|
||||||
|
|
||||||
|
if (IS_NULL_TYPE(para->type)) {
|
||||||
|
res.type = TSDB_DATA_TYPE_NULL;
|
||||||
|
res.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!resInit) {
|
||||||
|
res.type = para->type;
|
||||||
|
res.bytes = para->bytes;
|
||||||
|
resInit = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IS_MATHABLE_TYPE(para->type)) {
|
||||||
|
if (res.type == para->type) {
|
||||||
|
continue;
|
||||||
|
} else if (IS_MATHABLE_TYPE(res.type) || !mixTypeToStrings) {
|
||||||
|
int32_t resType = vectorGetConvertType(res.type, para->type);
|
||||||
|
res.type = resType == 0 ? res.type : resType;
|
||||||
|
res.bytes = tDataTypes[res.type].bytes;
|
||||||
|
} else {
|
||||||
|
// last res is strings, para is numeric and mixTypeToStrings is true
|
||||||
|
res.bytes = TMAX(res.bytes, NUMERIC_TO_STRINGS_LEN);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (IS_COMPARE_STR_DATA_TYPE(res.type)) {
|
||||||
|
int32_t resType = vectorGetConvertType(res.type, para->type);
|
||||||
|
res.type = resType == 0 ? res.type : resType;
|
||||||
|
res.bytes = TMAX(res.bytes, para->bytes);
|
||||||
|
} else if (mixTypeToStrings) {
|
||||||
|
// last res is numeric, para is string, and mixTypeToStrings is true
|
||||||
|
res.type = para->type;
|
||||||
|
res.bytes = TMAX(para->bytes, NUMERIC_TO_STRINGS_LEN);
|
||||||
|
} else {
|
||||||
|
// last res is numeric, para is string, and mixTypeToStrings is false
|
||||||
|
int32_t resType = vectorGetConvertType(res.type, para->type);
|
||||||
|
res.type = resType == 0 ? res.type : resType;
|
||||||
|
res.bytes = tDataTypes[resType].bytes;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pFunc->node.resType = res;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
|
@ -5656,6 +5715,48 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.name = "cols",
|
.name = "cols",
|
||||||
.translateFunc = invalidColsFunction,
|
.translateFunc = invalidColsFunction,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.name = "greatest",
|
||||||
|
.type = FUNCTION_TYPE_GREATEST,
|
||||||
|
.classification = FUNC_MGT_SCALAR_FUNC,
|
||||||
|
.parameters = {.minParamNum = 2,
|
||||||
|
.maxParamNum = -1,
|
||||||
|
.paramInfoPattern = 1,
|
||||||
|
.inputParaInfo[0][0] = {.isLastParam = true,
|
||||||
|
.startParam = 1,
|
||||||
|
.endParam = -1,
|
||||||
|
.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE | FUNC_PARAM_SUPPORT_BOOL_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE | FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE,
|
||||||
|
.validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE,
|
||||||
|
.paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE,
|
||||||
|
.valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,},
|
||||||
|
.outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}},
|
||||||
|
.translateFunc = translateGreatestleast,
|
||||||
|
.getEnvFunc = NULL,
|
||||||
|
.initFunc = NULL,
|
||||||
|
.sprocessFunc = greatestFunction,
|
||||||
|
.finalizeFunc = NULL
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "least",
|
||||||
|
.type = FUNCTION_TYPE_LEAST,
|
||||||
|
.classification = FUNC_MGT_SCALAR_FUNC,
|
||||||
|
.parameters = {.minParamNum = 2,
|
||||||
|
.maxParamNum = -1,
|
||||||
|
.paramInfoPattern = 1,
|
||||||
|
.inputParaInfo[0][0] = {.isLastParam = true,
|
||||||
|
.startParam = 1,
|
||||||
|
.endParam = -1,
|
||||||
|
.validDataType = FUNC_PARAM_SUPPORT_NUMERIC_TYPE | FUNC_PARAM_SUPPORT_NULL_TYPE | FUNC_PARAM_SUPPORT_BOOL_TYPE | FUNC_PARAM_SUPPORT_TIMESTAMP_TYPE | FUNC_PARAM_SUPPORT_VARCHAR_TYPE | FUNC_PARAM_SUPPORT_NCHAR_TYPE,
|
||||||
|
.validNodeType = FUNC_PARAM_SUPPORT_EXPR_NODE,
|
||||||
|
.paramAttribute = FUNC_PARAM_NO_SPECIFIC_ATTRIBUTE,
|
||||||
|
.valueRangeFlag = FUNC_PARAM_NO_SPECIFIC_VALUE,},
|
||||||
|
.outputParaInfo = {.validDataType = FUNC_PARAM_SUPPORT_ALL_TYPE}},
|
||||||
|
.translateFunc = translateGreatestleast,
|
||||||
|
.getEnvFunc = NULL,
|
||||||
|
.initFunc = NULL,
|
||||||
|
.sprocessFunc = leastFunction,
|
||||||
|
.finalizeFunc = NULL
|
||||||
|
},
|
||||||
};
|
};
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
|
|
|
@ -797,7 +797,7 @@ static bool funcNotSupportStringSma(SFunctionNode* pFunc) {
|
||||||
}
|
}
|
||||||
|
|
||||||
EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
|
EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
|
||||||
if(funcNotSupportStringSma(pFunc)) {
|
if (funcNotSupportStringSma(pFunc)) {
|
||||||
return FUNC_DATA_REQUIRED_DATA_LOAD;
|
return FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||||
}
|
}
|
||||||
return FUNC_DATA_REQUIRED_SMA_LOAD;
|
return FUNC_DATA_REQUIRED_SMA_LOAD;
|
||||||
|
@ -6611,7 +6611,7 @@ int32_t blockDBUsageFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||||
double compressRadio = 0;
|
double compressRadio = 0;
|
||||||
if (rawDataSize != 0) {
|
if (rawDataSize != 0) {
|
||||||
compressRadio = totalDiskSize * 100 / (double)rawDataSize;
|
compressRadio = totalDiskSize * 100 / (double)rawDataSize;
|
||||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Compress_radio=[%.2f]", compressRadio);
|
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Compress_radio=[%.2f%]", compressRadio);
|
||||||
} else {
|
} else {
|
||||||
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Compress_radio=[NULL]");
|
len = tsnprintf(varDataVal(st), sizeof(st) - VARSTR_HEADER_SIZE, "Compress_radio=[NULL]");
|
||||||
}
|
}
|
||||||
|
|
|
@ -532,9 +532,17 @@ int32_t sifStr2Num(char *buf, int32_t len, int8_t type, void *val) {
|
||||||
static int32_t sifSetFltParam(SIFParam *left, SIFParam *right, SDataTypeBuf *typedata, SMetaFltParam *param) {
|
static int32_t sifSetFltParam(SIFParam *left, SIFParam *right, SDataTypeBuf *typedata, SMetaFltParam *param) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int8_t ltype = left->colValType, rtype = right->colValType;
|
int8_t ltype = left->colValType, rtype = right->colValType;
|
||||||
if (!IS_NUMERIC_TYPE(ltype) || !((IS_NUMERIC_TYPE(rtype)) || rtype == TSDB_DATA_TYPE_VARCHAR)) {
|
// if (!IS_NUMERIC_TYPE(ltype) || !((IS_NUMERIC_TYPE(rtype)) || rtype == TSDB_DATA_TYPE_VARCHAR)) {
|
||||||
|
// return TSDB_CODE_INVALID_PARA;
|
||||||
|
// }
|
||||||
|
if (IS_VAR_DATA_TYPE(ltype)) {
|
||||||
|
if (ltype == TSDB_DATA_TYPE_VARCHAR || ltype == TSDB_DATA_TYPE_BINARY || ltype == TSDB_DATA_TYPE_VARBINARY) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
return TSDB_CODE_INVALID_PARA;
|
return TSDB_CODE_INVALID_PARA;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (ltype == TSDB_DATA_TYPE_FLOAT) {
|
if (ltype == TSDB_DATA_TYPE_FLOAT) {
|
||||||
float f = 0;
|
float f = 0;
|
||||||
if (IS_NUMERIC_TYPE(rtype)) {
|
if (IS_NUMERIC_TYPE(rtype)) {
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
|
#include <stdint.h>
|
||||||
#include "cJSON.h"
|
#include "cJSON.h"
|
||||||
#include "function.h"
|
#include "function.h"
|
||||||
#include "scalar.h"
|
#include "scalar.h"
|
||||||
#include "sclInt.h"
|
#include "sclInt.h"
|
||||||
#include "sclvector.h"
|
#include "sclvector.h"
|
||||||
#include "tdatablock.h"
|
#include "tdatablock.h"
|
||||||
|
#include "tdef.h"
|
||||||
#include "tjson.h"
|
#include "tjson.h"
|
||||||
#include "ttime.h"
|
#include "ttime.h"
|
||||||
|
#include "filter.h"
|
||||||
|
|
||||||
typedef float (*_float_fn)(float);
|
typedef float (*_float_fn)(float);
|
||||||
typedef float (*_float_fn_2)(float, float);
|
typedef float (*_float_fn_2)(float, float);
|
||||||
|
@ -4403,3 +4406,136 @@ int32_t modeScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam
|
||||||
return selectScalarFunction(pInput, inputNum, pOutput);
|
return selectScalarFunction(pInput, inputNum, pOutput);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef struct SCovertScarlarParam {
|
||||||
|
SScalarParam covertParam;
|
||||||
|
SScalarParam *param;
|
||||||
|
bool converted;
|
||||||
|
} SCovertScarlarParam;
|
||||||
|
|
||||||
|
void freeSCovertScarlarParams(SCovertScarlarParam *pCovertParams, int32_t num) {
|
||||||
|
if (pCovertParams == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (int32_t i = 0; i < num; i++) {
|
||||||
|
if (pCovertParams[i].converted) {
|
||||||
|
sclFreeParam(pCovertParams[i].param);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
taosMemoryFree(pCovertParams);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t vectorCompareAndSelect(SCovertScarlarParam *pParams, int32_t numOfRows, int numOfCols,
|
||||||
|
int32_t *resultColIndex, EOperatorType optr) {
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
int32_t type = GET_PARAM_TYPE(pParams[0].param);
|
||||||
|
|
||||||
|
__compar_fn_t fp = NULL;
|
||||||
|
code = filterGetCompFunc(&fp, type, optr);
|
||||||
|
if(code != TSDB_CODE_SUCCESS) {
|
||||||
|
qError("failed to get compare function, func:%s type:%d, optr:%d", __FUNCTION__, type, optr);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfRows; i++) {
|
||||||
|
int selectIndex = 0;
|
||||||
|
if (colDataIsNull_s(pParams[selectIndex].param->columnData, i)) {
|
||||||
|
resultColIndex[i] = -1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
for (int32_t j = 1; j < numOfCols; j++) {
|
||||||
|
if (colDataIsNull_s(pParams[j].param->columnData, i)) {
|
||||||
|
resultColIndex[i] = -1;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
int32_t leftRowNo = pParams[selectIndex].param->numOfRows == 1 ? 0 : i;
|
||||||
|
int32_t rightRowNo = pParams[j].param->numOfRows == 1 ? 0 : i;
|
||||||
|
char *pLeftData = colDataGetData(pParams[selectIndex].param->columnData, leftRowNo);
|
||||||
|
char *pRightData = colDataGetData(pParams[j].param->columnData, rightRowNo);
|
||||||
|
bool pRes = filterDoCompare(fp, optr, pLeftData, pRightData);
|
||||||
|
if (!pRes) {
|
||||||
|
selectIndex = j;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resultColIndex[i] = selectIndex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t greatestLeastImpl(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput, EOperatorType order) {
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
SColumnInfoData *pOutputData = pOutput[0].columnData;
|
||||||
|
int16_t outputType = GET_PARAM_TYPE(&pOutput[0]);
|
||||||
|
int64_t outputLen = GET_PARAM_BYTES(&pOutput[0]);
|
||||||
|
|
||||||
|
SCovertScarlarParam *pCovertParams = NULL;
|
||||||
|
int32_t *resultColIndex = NULL;
|
||||||
|
|
||||||
|
int32_t numOfRows = 0;
|
||||||
|
bool IsNullType = outputType == TSDB_DATA_TYPE_NULL ? true : false;
|
||||||
|
// If any column is NULL type, the output is NULL type
|
||||||
|
for (int32_t i = 0; i < inputNum; i++) {
|
||||||
|
if (IsNullType) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (numOfRows != 0 && numOfRows != pInput[i].numOfRows && pInput[i].numOfRows != 1 && numOfRows != 1) {
|
||||||
|
qError("input rows not match, func:%s, rows:%d, %d", __FUNCTION__, numOfRows, pInput[i].numOfRows);
|
||||||
|
code = TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||||
|
goto _return;
|
||||||
|
}
|
||||||
|
numOfRows = TMAX(numOfRows, pInput[i].numOfRows);
|
||||||
|
IsNullType |= IS_NULL_TYPE(GET_PARAM_TYPE(&pInput[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IsNullType) {
|
||||||
|
colDataSetNNULL(pOutputData, 0, numOfRows);
|
||||||
|
pOutput->numOfRows = numOfRows;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
pCovertParams = taosMemoryMalloc(inputNum * sizeof(SCovertScarlarParam));
|
||||||
|
for (int32_t j = 0; j < inputNum; j++) {
|
||||||
|
SScalarParam *pParam = &pInput[j];
|
||||||
|
int16_t oldType = GET_PARAM_TYPE(&pInput[j]);
|
||||||
|
if (oldType != outputType) {
|
||||||
|
pCovertParams[j].covertParam = (SScalarParam){0};
|
||||||
|
setTzCharset(&pCovertParams[j].covertParam, pParam->tz, pParam->charsetCxt);
|
||||||
|
SCL_ERR_JRET(vectorConvertSingleCol(pParam, &pCovertParams[j].covertParam, outputType, 0, pParam->numOfRows));
|
||||||
|
pCovertParams[j].param = &pCovertParams[j].covertParam;
|
||||||
|
pCovertParams[j].converted = true;
|
||||||
|
} else {
|
||||||
|
pCovertParams[j].param = pParam;
|
||||||
|
pCovertParams[j].converted = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resultColIndex = taosMemoryCalloc(numOfRows, sizeof(int32_t));
|
||||||
|
SCL_ERR_JRET(vectorCompareAndSelect(pCovertParams, numOfRows, inputNum, resultColIndex, order));
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfRows; i++) {
|
||||||
|
int32_t index = resultColIndex[i];
|
||||||
|
if (index == -1) {
|
||||||
|
colDataSetNULL(pOutputData, i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
int32_t rowNo = pCovertParams[index].param->numOfRows == 1 ? 0 : i;
|
||||||
|
char *data = colDataGetData(pCovertParams[index].param->columnData, rowNo);
|
||||||
|
SCL_ERR_JRET(colDataSetVal(pOutputData, i, data, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
pOutput->numOfRows = numOfRows;
|
||||||
|
|
||||||
|
_return:
|
||||||
|
freeSCovertScarlarParams(pCovertParams, inputNum);
|
||||||
|
taosMemoryFree(resultColIndex);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t greatestFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||||
|
return greatestLeastImpl(pInput, inputNum, pOutput, OP_TYPE_GREATER_THAN);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t leastFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||||
|
return greatestLeastImpl(pInput, inputNum, pOutput, OP_TYPE_LOWER_THAN);
|
||||||
|
}
|
||||||
|
|
||||||
|
|