From 89adf377248c51c16d888346370201e62cbfa661 Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 18 Mar 2025 14:48:44 +0800 Subject: [PATCH 01/23] ci: remove if condition for invoking the test workflow --- .github/workflows/tdengine-test.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/tdengine-test.yml b/.github/workflows/tdengine-test.yml index 9c7a35f8dc..436eedd0e2 100644 --- a/.github/workflows/tdengine-test.yml +++ b/.github/workflows/tdengine-test.yml @@ -44,7 +44,6 @@ env: jobs: run-tests-on-linux: uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main - if: ${{ github.event_name == 'pull_request' }} with: tdinternal: false specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }} @@ -53,7 +52,6 @@ jobs: run-tests-on-mac: uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main - if: ${{ github.event_name == 'pull_request' }} with: tdinternal: false specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }} @@ -62,7 +60,6 @@ jobs: run-tests-on-windows: uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main - if: ${{ github.event_name == 'pull_request' }} with: tdinternal: false specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }} From e8cc3fcc9c5c3d05ccc92d9f33178787fffad657 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 18 Mar 2025 15:15:58 +0800 Subject: [PATCH 02/23] feat: enhance removal script with command-line options for data retention --- packaging/tools/remove.sh | 62 +++++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 621ed7f2a9..7aafaa0cb3 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -236,21 +236,55 @@ function remove_data_and_config() { [ -d "${log_dir}" ] && ${csudo}rm -rf ${log_dir} } -echo -echo "Do you want to remove all the data, log and configuration files? [y/n]" -read answer -remove_flag=false -if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then - confirmMsg="I confirm that I would like to delete all data, log and configuration files" - echo "Please enter '${confirmMsg}' to continue" +# 解析命令行参数 +interactive_remove="yes" +while getopts "e:h" opt; do + case $opt in + e) + if [ "$OPTARG" == "yes" ]; then + interactive_remove="no" + remove_flag=false + echo "It will remove only the binary files and keep all the data, log, and configuration files." + elif [ "$OPTARG" == "no" ]; then + interactive_remove="no" + remove_flag=true + echo "It will remove the binary files and all the data, log, and configuration files." + else + echo "Invalid option for -e: $OPTARG" + exit 1 + fi + ;; + h) + echo "Usage: $(basename $0) -e [yes | no] " + echo " select 'yes' to skip prompt and remove only the binary files and keep all the data, log, and configuration files." + echo " select 'no' to skip prompt and remove the binary files and all the data, log, and configuration files" + + exit 0 + ;; + *) + echo "Invalid option: -$opt" + exit 1 + ;; + esac +done + +if [ "$interactive_remove" == "yes" ]; then + echo + echo "Do you want to remove all the data, log and configuration files? [y/n]" read answer - if [ X"$answer" == X"${confirmMsg}" ]; then - remove_flag=true - else - echo "answer doesn't match, skip this step" + remove_flag=false + if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then + confirmMsg="I confirm that I would like to delete all data, log and configuration files" + echo "Please enter '${confirmMsg}' to continue" + read answer + if [ X"$answer" == X"${confirmMsg}" ]; then + remove_flag=true + else + echo "answer doesn't match, skip this step" + fi fi + echo fi -echo if [ -e ${install_main_dir}/uninstall_${PREFIX}x.sh ]; then if [ X$remove_flag == X"true" ]; then @@ -260,7 +294,6 @@ if [ -e ${install_main_dir}/uninstall_${PREFIX}x.sh ]; then fi fi - if [ "$osType" = "Darwin" ]; then clean_service_on_launchctl ${csudo}rm -rf /Applications/TDengine.app @@ -299,8 +332,7 @@ elif echo $osinfo | grep -qwi "centos"; then ${csudo}rpm -e --noscripts tdengine >/dev/null 2>&1 || : fi - command -v systemctl >/dev/null 2>&1 && ${csudo}systemctl daemon-reload >/dev/null 2>&1 || true echo echo "${productName} is removed successfully!" -echo +echo \ No newline at end of file From e7e65a059de3071d1dc91e72fe57c1d50df3e61d Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Tue, 18 Mar 2025 16:23:20 +0800 Subject: [PATCH 03/23] refactor: silent mode Signed-off-by: WANG Xu --- packaging/tools/remove.sh | 41 ++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 7aafaa0cb3..bf5252e448 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -236,49 +236,50 @@ function remove_data_and_config() { [ -d "${log_dir}" ] && ${csudo}rm -rf ${log_dir} } -# 解析命令行参数 +function usage() { + echo -e "\nUsage: $(basename $0) [-e ]" + echo "-e: silent mode, specify whether to remove all the data, log and configuration files." + echo " yes: remove the data, log, and configuration files." + echo " no: don't remove the data, log, and configuration files." +} + +# main interactive_remove="yes" +remove_flag="false" + while getopts "e:h" opt; do case $opt in e) + interactive_remove="no" + if [ "$OPTARG" == "yes" ]; then - interactive_remove="no" - remove_flag=false - echo "It will remove only the binary files and keep all the data, log, and configuration files." + remove_flag="true" + echo "Remove all the data, log, and configuration files." elif [ "$OPTARG" == "no" ]; then - interactive_remove="no" - remove_flag=true - echo "It will remove the binary files and all the data, log, and configuration files." + remove_flag="false" + echo "Do NOT remove the data, log, and configuration files." else echo "Invalid option for -e: $OPTARG" + usage exit 1 fi ;; - h) - echo "Usage: $(basename $0) -e [yes | no] " - echo " select 'yes' to skip prompt and remove only the binary files and keep all the data, log, and configuration files." - echo " select 'no' to skip prompt and remove the binary files and all the data, log, and configuration files" - - exit 0 - ;; - *) - echo "Invalid option: -$opt" + h | *) + usage exit 1 ;; esac done if [ "$interactive_remove" == "yes" ]; then - echo - echo "Do you want to remove all the data, log and configuration files? [y/n]" + echo -e "\nDo you want to remove all the data, log and configuration files? [y/n]" read answer - remove_flag=false if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then confirmMsg="I confirm that I would like to delete all data, log and configuration files" echo "Please enter '${confirmMsg}' to continue" read answer if [ X"$answer" == X"${confirmMsg}" ]; then - remove_flag=true + remove_flag="true" else echo "answer doesn't match, skip this step" fi From 0c1db8feb2b11ed3a1c33c393b2c4ba4c9df7360 Mon Sep 17 00:00:00 2001 From: Minglei Jin <49711132+stephenkgu@users.noreply.github.com> Date: Tue, 18 Mar 2025 16:40:54 +0800 Subject: [PATCH 04/23] fix(tdb/test): remove unstable cases (#30238) --- source/libs/tdb/test/CMakeLists.txt | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/source/libs/tdb/test/CMakeLists.txt b/source/libs/tdb/test/CMakeLists.txt index fbc9b38424..46fded3b18 100644 --- a/source/libs/tdb/test/CMakeLists.txt +++ b/source/libs/tdb/test/CMakeLists.txt @@ -2,10 +2,6 @@ if(${TD_LINUX}) add_executable(tdbTest "tdbTest.cpp") target_link_libraries(tdbTest tdb gtest_main) - add_test( - NAME tdbTest - COMMAND tdbTest - ) endif() # tdbUtilTest @@ -19,26 +15,14 @@ add_test( # overflow pages testing add_executable(tdbExOVFLTest "tdbExOVFLTest.cpp") target_link_libraries(tdbExOVFLTest tdb gtest_main) -add_test( - NAME tdbExOVFLTest - COMMAND tdbExOVFLTest -) # page defragment testing add_executable(tdbPageDefragmentTest "tdbPageDefragmentTest.cpp") target_link_libraries(tdbPageDefragmentTest tdb gtest_main) -add_test( - NAME tdbPageDefragmentTest - COMMAND tdbPageDefragmentTest -) # page recycling testing add_executable(tdbPageRecycleTest "tdbPageRecycleTest.cpp") target_link_libraries(tdbPageRecycleTest tdb gtest_main) -add_test( - NAME tdbPageRecycleTest - COMMAND tdbPageRecycleTest -) # page flush testing add_executable(tdbPageFlushTest "tdbPageFlushTest.cpp") From f830d09e7f6dcb434f6597f6681a274dbf5f4e7d Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 18 Mar 2025 17:12:12 +0800 Subject: [PATCH 05/23] fix: update removal scripts to check for symbolic links before deletion --- packaging/tools/remove.sh | 6 +++--- packaging/tools/remove_client.sh | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index bf5252e448..ec73ca88cf 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -162,9 +162,9 @@ remove_service_of() { remove_tools_of() { _tool=$1 kill_service_of ${_tool} - [ -e "${bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${bin_link_dir}/${_tool} || : + [ -L "${bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${bin_link_dir}/${_tool} || : [ -e "${installDir}/bin/${_tool}" ] && ${csudo}rm -rf ${installDir}/bin/${_tool} || : - [ -e "${local_bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${local_bin_link_dir}/${_tool} || : + [ -L "${local_bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${local_bin_link_dir}/${_tool} || : } remove_bin() { @@ -257,7 +257,7 @@ while getopts "e:h" opt; do echo "Remove all the data, log, and configuration files." elif [ "$OPTARG" == "no" ]; then remove_flag="false" - echo "Do NOT remove the data, log, and configuration files." + echo "Do not remove the data, log, and configuration files." else echo "Invalid option for -e: $OPTARG" usage diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 33454d7512..a7eb225704 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -57,7 +57,7 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : ${csudo}rm -f ${bin_link_dir}/set_core || : - [ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : + [ -L ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then ${csudo}rm -f ${bin_link_dir}/${clientName2} || : @@ -65,7 +65,7 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : - [ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : + [ -L ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : fi } From 2d57795e703352d996d8470fd0e550d7a3091ea1 Mon Sep 17 00:00:00 2001 From: She Yanjie <57549981+sheyanjie-qq@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:13:39 +0800 Subject: [PATCH 06/23] add jdbc decimal (#30246) --- docs/en/14-reference/05-connector/14-java.md | 1 + docs/zh/14-reference/05-connector/14-java.mdx | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/en/14-reference/05-connector/14-java.md b/docs/en/14-reference/05-connector/14-java.md index 43b219bf4e..8486f7d4d3 100644 --- a/docs/en/14-reference/05-connector/14-java.md +++ b/docs/en/14-reference/05-connector/14-java.md @@ -148,6 +148,7 @@ TDengine currently supports timestamp, numeric, character, boolean types, and th | JSON | java.lang.String | only supported in tags | | VARBINARY | byte[] | | | GEOMETRY | byte[] | | +| DECIMAL | java.math.BigDecimal | | **Note**: Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead. GEOMETRY type is binary data in little endian byte order, complying with the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/) diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index a90af822b0..d4138249fc 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -148,6 +148,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 | JSON | java.lang.String |仅在 tag 中支持| | VARBINARY | byte[] || | GEOMETRY | byte[] || +| DECIMAL | java.math.BigDecimal || **注意**:由于历史原因,TDengine中的BINARY底层不是真正的二进制数据,已不建议使用。请用VARBINARY类型代替。 GEOMETRY类型是little endian字节序的二进制数据,符合WKB规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型) From f62ef250fdb87d89d266cf17bd633899994a40fc Mon Sep 17 00:00:00 2001 From: jiajingbin <39030567+jiajingbin@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:35:12 +0800 Subject: [PATCH 07/23] feat: add dockerfile and modify install.sh (#30242) * feat: add Dockerfile and modify install.sh * enh: add a port * fix: modify addr to 0.0.0.0 from 127.0.0.1 in taosanode.ini --- tools/tdgpt/cfg/taosanode.ini | 4 ++-- tools/tdgpt/dockerfile/base_image/Dockerfile | 24 ++++++++++++++++++++ tools/tdgpt/dockerfile/base_image/README | 1 + tools/tdgpt/dockerfile/tdgpt/Dockerfile | 11 +++++++++ tools/tdgpt/dockerfile/tdgpt/README | 1 + tools/tdgpt/dockerfile/tdgpt/entrypoint.sh | 20 ++++++++++++++++ tools/tdgpt/script/install.sh | 16 +++++++++++-- 7 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 tools/tdgpt/dockerfile/base_image/Dockerfile create mode 100644 tools/tdgpt/dockerfile/base_image/README create mode 100644 tools/tdgpt/dockerfile/tdgpt/Dockerfile create mode 100644 tools/tdgpt/dockerfile/tdgpt/README create mode 100755 tools/tdgpt/dockerfile/tdgpt/entrypoint.sh diff --git a/tools/tdgpt/cfg/taosanode.ini b/tools/tdgpt/cfg/taosanode.ini index 51782bccd6..12ba6b5776 100755 --- a/tools/tdgpt/cfg/taosanode.ini +++ b/tools/tdgpt/cfg/taosanode.ini @@ -7,7 +7,7 @@ env = LC_ALL = en_US.UTF-8 # ip:port -http = 127.0.0.1:6090 +http = 0.0.0.0:6090 # the local unix socket file than communicate to Nginx #socket = 127.0.0.1:8001 @@ -62,7 +62,7 @@ reload-mercy = 10 logto = /var/log/taos/taosanode/taosanode.log # wWSGI monitor port -stats = 127.0.0.1:8387 +stats = 0.0.0.0:8387 # python virtual environment directory virtualenv = /usr/local/taos/taosanode/venv/ diff --git a/tools/tdgpt/dockerfile/base_image/Dockerfile b/tools/tdgpt/dockerfile/base_image/Dockerfile new file mode 100644 index 0000000000..5c9f719f25 --- /dev/null +++ b/tools/tdgpt/dockerfile/base_image/Dockerfile @@ -0,0 +1,24 @@ +FROM python:3.10-slim AS builder + +ENV VIRTUAL_ENV=/var/lib/taos/taosanode/venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +RUN apt-get -o Acquire::Check-Valid-Until=false -o Acquire::Check-Date=false update -y && \ + apt-get install -y --no-install-recommends gcc libc-dev procps && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + python3.10 -m venv /var/lib/taos/taosanode/venv && \ + pip install --upgrade pip && \ + pip install --ignore-installed blinker && \ + pip install numpy==1.26.4 pandas==1.5.0 scikit-learn outlier_utils statsmodels pyculiarity pmdarima flask matplotlib uwsgi -i https://pypi.tuna.tsinghua.edu.cn/simple && \ + pip install torch --index-url https://download.pytorch.org/whl/cpu && \ + pip install --upgrade keras -i https://pypi.tuna.tsinghua.edu.cn/simple + +FROM python:3.10-slim +COPY --from=builder /var/lib/taos/taosanode/venv /var/lib/taos/taosanode/venv +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends procps && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* +ENV VIRTUAL_ENV=/var/lib/taos/taosanode/venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" diff --git a/tools/tdgpt/dockerfile/base_image/README b/tools/tdgpt/dockerfile/base_image/README new file mode 100644 index 0000000000..3c4ecd3104 --- /dev/null +++ b/tools/tdgpt/dockerfile/base_image/README @@ -0,0 +1 @@ +docker build -t "tdgpt_env:1.0" . diff --git a/tools/tdgpt/dockerfile/tdgpt/Dockerfile b/tools/tdgpt/dockerfile/tdgpt/Dockerfile new file mode 100644 index 0000000000..8192c378ac --- /dev/null +++ b/tools/tdgpt/dockerfile/tdgpt/Dockerfile @@ -0,0 +1,11 @@ +FROM tdgpt_env:1.0 +WORKDIR /apps +ENV DEBIAN_FRONTEND=noninteractive +ARG pkgFile +ARG dirName +ADD ${pkgFile} /apps +RUN cd ${dirName}/ && /bin/bash install.sh -e no && cd .. && rm -rf ${dirName} +COPY entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh +EXPOSE 6090 8387 +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/tools/tdgpt/dockerfile/tdgpt/README b/tools/tdgpt/dockerfile/tdgpt/README new file mode 100644 index 0000000000..9963313ae0 --- /dev/null +++ b/tools/tdgpt/dockerfile/tdgpt/README @@ -0,0 +1 @@ +docker build --build-arg pkgFile=TDengine-anode-3.3.6.0-Linux-x64.tar.gz --build-arg dirName=TDengine-anode-3.3.6.0 -t "tdgpt-ce:3.3.6.0" . diff --git a/tools/tdgpt/dockerfile/tdgpt/entrypoint.sh b/tools/tdgpt/dockerfile/tdgpt/entrypoint.sh new file mode 100755 index 0000000000..1f44ab4b77 --- /dev/null +++ b/tools/tdgpt/dockerfile/tdgpt/entrypoint.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export PATH="/usr/local/taos/taosanode/venv/bin:$PATH" +export LANG=en_US.UTF-8 +export LC_CTYPE=en_US.UTF-8 +export LC_ALL=en_US.UTF-8 + +CONFIG_FILE="/usr/local/taos/taosanode/cfg/taosanode.ini" +if [ ! -f "$CONFIG_FILE" ]; then + echo "Error: Configuration file $CONFIG_FILE not found!" + exit 1 +fi + +echo "Starting uWSGI with config: $CONFIG_FILE" +exec /usr/local/taos/taosanode/venv/bin/uwsgi --ini "$CONFIG_FILE" + +if [ $? -ne 0 ]; then + echo "uWSGI failed to start. Exiting..." + exit 1 +fi diff --git a/tools/tdgpt/script/install.sh b/tools/tdgpt/script/install.sh index 9308b37cfc..9952b7f0af 100755 --- a/tools/tdgpt/script/install.sh +++ b/tools/tdgpt/script/install.sh @@ -482,6 +482,14 @@ function install_service_on_systemd() { ${csudo}systemctl daemon-reload } +function is_container() { + if [[ -f /.dockerenv ]] || grep -q "docker\|kubepods" /proc/1/cgroup || [[ -n "$KUBERNETES_SERVICE_HOST" || "$container" == "docker" ]]; then + return 0 # container env + else + return 1 # not container env + fi +} + function install_service() { if ((${service_mod} == 0)); then install_service_on_systemd $1 @@ -615,7 +623,9 @@ function updateProduct() { if [ -z $1 ]; then install_bin - install_services + if ! is_container; then + install_services + fi echo echo -e "${GREEN_DARK}To configure ${productName} ${NC}\t\t: edit ${global_conf_dir}/${configFile}" @@ -659,7 +669,9 @@ function installProduct() { install_module install_bin_and_lib - install_services + if ! is_container; then + install_services + fi echo echo -e "\033[44;32;1m${productName} is installed successfully!${NC}" From e333f44dfbca3deb218b6c50d891573811661f00 Mon Sep 17 00:00:00 2001 From: Alex Duan <51781608+DuanKuanJun@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:48:24 +0800 Subject: [PATCH 08/23] fix(taosbenchmark): pass db options support prefix suffix with blank (#30205) * fix: pass db parameter support prefix suffix blank * fix: add check vgroups have blank case * fix: remove strlen from trimCaseCmp * fix: case init variant and remove taos-tools from .gitignore --- .gitignore | 1 - .../army/tools/benchmark/basic/insertBasic.py | 6 ++++- .../tools/benchmark/basic/insertPrecision.py | 8 ++++-- .../benchmark/basic/json/insertBasic.json | 2 +- .../basic/json/insertPrecisionMS.json | 2 +- .../basic/json/insertPrecisionNS.json | 2 +- .../basic/json/insertPrecisionUS.json | 2 +- tools/taos-tools/inc/bench.h | 3 +++ tools/taos-tools/src/benchInsert.c | 2 +- tools/taos-tools/src/benchJsonOpt.c | 14 +++++----- tools/taos-tools/src/benchUtil.c | 27 +++++++++++++++++++ 11 files changed, 53 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index cd1cac4a6d..aa1f567bf7 100644 --- a/.gitignore +++ b/.gitignore @@ -132,7 +132,6 @@ tools/THANKS tools/NEWS tools/COPYING tools/BUGS -tools/taos-tools tools/taosws-rs tags .clangd diff --git a/tests/army/tools/benchmark/basic/insertBasic.py b/tests/army/tools/benchmark/basic/insertBasic.py index dfc5e3aff1..9ae118f5bc 100644 --- a/tests/army/tools/benchmark/basic/insertBasic.py +++ b/tests/army/tools/benchmark/basic/insertBasic.py @@ -80,12 +80,16 @@ class TDTestCase(TBase): cmdVG = arr[1] # vgropus + vgroups = None try: if cmdVG != None: # command special vgroups first priority vgroups = cmdVG else: - vgroups = data["databases"][0]["dbinfo"]["vgroups"] + dbinfo = data["databases"][0]["dbinfo"] + for key,value in dbinfo.items(): + if key.strip().lower() == "vgroups": + vgroups = value except: vgroups = None diff --git a/tests/army/tools/benchmark/basic/insertPrecision.py b/tests/army/tools/benchmark/basic/insertPrecision.py index c0f0f25851..5be8285859 100644 --- a/tests/army/tools/benchmark/basic/insertPrecision.py +++ b/tests/army/tools/benchmark/basic/insertPrecision.py @@ -31,6 +31,7 @@ class TDTestCase(TBase): # exe insert cmd = f"{benchmark} {options} -f {jsonFile}" os.system(cmd) + precision = None # # check insert result @@ -39,14 +40,17 @@ class TDTestCase(TBase): data = json.load(file) db = data["databases"][0]["dbinfo"]["name"] - precison = data["databases"][0]["dbinfo"]["precision"] + dbinfo = data["databases"][0]["dbinfo"] + for key,value in dbinfo.items(): + if key.strip().lower() == "precision": + precision = value stb = data["databases"][0]["super_tables"][0]["name"] child_count = data["databases"][0]["super_tables"][0]["childtable_count"] insert_rows = data["databases"][0]["super_tables"][0]["insert_rows"] timestamp_step = data["databases"][0]["super_tables"][0]["timestamp_step"] start_timestamp = data["databases"][0]["super_tables"][0]["start_timestamp"] - tdLog.info(f"get json info: db={db} precision={precison} stb={stb} child_count={child_count} insert_rows={insert_rows} " + tdLog.info(f"get json info: db={db} precision={precision} stb={stb} child_count={child_count} insert_rows={insert_rows} " f"start_timestamp={start_timestamp} timestamp_step={timestamp_step} \n") # all count insert_rows * child_table_count diff --git a/tests/army/tools/benchmark/basic/json/insertBasic.json b/tests/army/tools/benchmark/basic/json/insertBasic.json index 2fc59befe9..b6b74accf5 100644 --- a/tests/army/tools/benchmark/basic/json/insertBasic.json +++ b/tests/army/tools/benchmark/basic/json/insertBasic.json @@ -14,7 +14,7 @@ "name": "test", "drop": "yes", "precision": "ns", - "vgroups": 2 + " vgroups ": 2 }, "super_tables": [ { diff --git a/tests/army/tools/benchmark/basic/json/insertPrecisionMS.json b/tests/army/tools/benchmark/basic/json/insertPrecisionMS.json index a07881511d..fcae767c33 100644 --- a/tests/army/tools/benchmark/basic/json/insertPrecisionMS.json +++ b/tests/army/tools/benchmark/basic/json/insertPrecisionMS.json @@ -13,7 +13,7 @@ "dbinfo": { "name": "test", "drop": "yes", - "precision": "ms", + " precision ": "ms", "vgroups": 2 }, "super_tables": [ diff --git a/tests/army/tools/benchmark/basic/json/insertPrecisionNS.json b/tests/army/tools/benchmark/basic/json/insertPrecisionNS.json index e18eef9508..887795b5c2 100644 --- a/tests/army/tools/benchmark/basic/json/insertPrecisionNS.json +++ b/tests/army/tools/benchmark/basic/json/insertPrecisionNS.json @@ -13,7 +13,7 @@ "dbinfo": { "name": "test", "drop": "yes", - "precision": "ns", + " precision": "ns", "vgroups": 2 }, "super_tables": [ diff --git a/tests/army/tools/benchmark/basic/json/insertPrecisionUS.json b/tests/army/tools/benchmark/basic/json/insertPrecisionUS.json index a29c50a95e..ce01d27a57 100644 --- a/tests/army/tools/benchmark/basic/json/insertPrecisionUS.json +++ b/tests/army/tools/benchmark/basic/json/insertPrecisionUS.json @@ -13,7 +13,7 @@ "dbinfo": { "name": "test", "drop": "yes", - "precision": "us", + "precision ": "us", "vgroups": 2 }, "super_tables": [ diff --git a/tools/taos-tools/inc/bench.h b/tools/taos-tools/inc/bench.h index b432e20113..8adb879301 100644 --- a/tools/taos-tools/inc/bench.h +++ b/tools/taos-tools/inc/bench.h @@ -1061,4 +1061,7 @@ int killSlowQuery(); // fetch super table child name from server int fetchChildTableName(char *dbName, char *stbName); +// trim prefix suffix blank cmp +int trimCaseCmp(char *str1,char *str2); + #endif // INC_BENCH_H_ diff --git a/tools/taos-tools/src/benchInsert.c b/tools/taos-tools/src/benchInsert.c index 894e4621a0..0ad4c1031a 100644 --- a/tools/taos-tools/src/benchInsert.c +++ b/tools/taos-tools/src/benchInsert.c @@ -643,7 +643,7 @@ int geneDbCreateCmd(SDataBase *database, char *command, int remainVnodes) { SDbCfg* cfg = benchArrayGet(database->cfgs, i); // check vgroups - if (strcasecmp(cfg->name, "vgroups") == 0) { + if (trimCaseCmp(cfg->name, "vgroups") == 0) { if (vgroups > 0) { // inputted vgroups by commandline infoPrint("ignore config set vgroups %d\n", cfg->valueint); diff --git a/tools/taos-tools/src/benchJsonOpt.c b/tools/taos-tools/src/benchJsonOpt.c index 9bbecebe65..952bef3e8b 100644 --- a/tools/taos-tools/src/benchJsonOpt.c +++ b/tools/taos-tools/src/benchJsonOpt.c @@ -602,9 +602,9 @@ void setDBCfgString(SDbCfg* cfg , char * value) { // need add quotation bool add = false; - if (0 == strcasecmp(cfg->name, "cachemodel") || - 0 == strcasecmp(cfg->name, "dnodes" ) || - 0 == strcasecmp(cfg->name, "precision" ) ) { + if (0 == trimCaseCmp(cfg->name, "cachemodel") || + 0 == trimCaseCmp(cfg->name, "dnodes" ) || + 0 == trimCaseCmp(cfg->name, "precision" ) ) { add = true; } @@ -676,12 +676,12 @@ static int getDatabaseInfo(tools_cJSON *dbinfos, int index) { && (0 == strcasecmp(cfg_object->valuestring, "yes"))) { database->flush = true; } - } else if (0 == strcasecmp(cfg_object->string, "precision")) { + } else if (0 == trimCaseCmp(cfg_object->string, "precision")) { if (tools_cJSON_IsString(cfg_object)) { - if (0 == strcasecmp(cfg_object->valuestring, "us")) { + if (0 == trimCaseCmp(cfg_object->valuestring, "us")) { database->precision = TSDB_TIME_PRECISION_MICRO; database->sml_precision = TSDB_SML_TIMESTAMP_MICRO_SECONDS; - } else if (0 == strcasecmp(cfg_object->valuestring, "ns")) { + } else if (0 == trimCaseCmp(cfg_object->valuestring, "ns")) { database->precision = TSDB_TIME_PRECISION_NANO; database->sml_precision = TSDB_SML_TIMESTAMP_NANO_SECONDS; } @@ -691,7 +691,7 @@ static int getDatabaseInfo(tools_cJSON *dbinfos, int index) { cfg->name = cfg_object->string; // get duration value - if (0 == strcasecmp(cfg_object->string, "duration")) { + if (0 == trimCaseCmp(cfg_object->string, "duration")) { database->durMinute = getDurationVal(cfg_object); } diff --git a/tools/taos-tools/src/benchUtil.c b/tools/taos-tools/src/benchUtil.c index 8dcca30f8c..f010920dbb 100644 --- a/tools/taos-tools/src/benchUtil.c +++ b/tools/taos-tools/src/benchUtil.c @@ -1776,3 +1776,30 @@ int fetchChildTableName(char *dbName, char *stbName) { // succ return 0; } + +// skip prefix suffix blank +int trimCaseCmp(char *str1, char *str2) { + // Skip leading whitespace in str1 + while (isblank((unsigned char)*str1)) { + str1++; + } + + // Compare characters case-insensitively + while (*str2 != '\0') { + if (tolower((unsigned char)*str1) != tolower((unsigned char)*str2)) { + return -1; + } + str1++; + str2++; + } + + // Check if the remaining characters in str1 are all whitespace + while (*str1 != '\0') { + if (!isblank((unsigned char)*str1)) { + return -1; + } + str1++; + } + + return 0; +} \ No newline at end of file From 789f36eddacaaa7896e7c68aaddbeebcd8bfae4a Mon Sep 17 00:00:00 2001 From: xinsheng Ren <285808407@qq.com> Date: Tue, 18 Mar 2025 17:52:21 +0800 Subject: [PATCH 09/23] docs: udf function, extract_avg (#30244) --- docs/en/07-develop/09-udf.md | 42 ++++++++++- docs/zh/07-develop/09-udf.md | 38 +++++++++- tests/script/sh/extract_avg.c | 128 ++++++++++++++++++++++++++++++++++ 3 files changed, 205 insertions(+), 3 deletions(-) create mode 100644 tests/script/sh/extract_avg.c diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 138378e450..d39721de17 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -298,13 +298,53 @@ select max_vol(vol1, vol2, vol3, deviceid) from battery; +#### Aggregate Function Example 3 Split string and calculate average value [extract_avg](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/extract_avg.c) + +The `extract_avg` function converts a comma-separated string sequence into a set of numerical values, counts the results of all rows, and calculates the final average. Note when implementing: +- `interBuf->numOfResult` needs to return 1 or 0 and cannot be used for count. +- Count can use additional caches, such as the `SumCount` structure. +- Use `varDataVal` to obtain the string. + +Create table: + +```shell +create table scores(ts timestamp, varStr varchar(128)); +``` + +Create custom function: + +```shell +create aggregate function extract_avg as '/root/udf/libextract_avg.so' outputtype double bufsize 16 language 'C'; +``` + +Use custom function: + +```shell +select extract_avg(valStr) from scores; +``` + +Generate `.so` file +```bash +gcc -g -O0 -fPIC -shared extract_vag.c -o libextract_avg.so +``` + +
+max_vol.c + +```c +{{#include tests/script/sh/max_vol.c}} +``` + +
+ + ## Developing UDFs in Python Language ### Environment Setup The specific steps to prepare the environment are as follows: -- Step 1, prepare the Python runtime environment. +- Step 1, prepare the Python runtime environment. If you compile and install Python locally, be sure to enable the `--enable-shared` option, otherwise the subsequent installation of taospyudf will fail due to failure to generate a shared library. - Step 2, install the Python package taospyudf. The command is as follows. ```shell diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index d0f9c93652..9438df0b1c 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -237,7 +237,7 @@ typedef struct SUdfInterBuf { #### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c) -bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。 +bit_and 实现多列的按位与功能。如果只有一列,返回这一列。bit_and 忽略空值。
bit_and.c @@ -287,12 +287,46 @@ select max_vol(vol1, vol2, vol3, deviceid) from battery;
+#### 聚合函数示例3 切分字符串求平均值 [extract_avg](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/extract_avg.c) + +`extract_avg` 函数是将一个逗号分隔的字符串数列转为一组数值,统计所有行的结果,计算最终平均值。实现时需注意: +- `interBuf->numOfResult` 需要返回 1 或者 0,不能用于 count 计数。 +- count 计数可使用额外的缓存,例如 `SumCount` 结构体。 +- 字符串的获取需使用`varDataVal`。 + +创建表: +```bash +create table scores(ts timestamp, varStr varchar(128)); +``` +创建自定义函数: +```bash +create aggregate function extract_avg as '/root/udf/libextract_avg.so' outputtype double bufsize 16 language 'C'; +``` +使用自定义函数: +```bash +select extract_avg(valStr) from scores; +``` + +生成 `.so` 文件 +```bash +gcc -g -O0 -fPIC -shared extract_vag.c -o libextract_avg.so +``` + +
+extract_avg.c + +```c +{{#include tests/script/sh/extract_avg.c}} +``` + +
+ ## 用 Python 语言开发 UDF ### 准备环境 准备环境的具体步骤如下: -- 第 1 步,准备好 Python 运行环境。 +- 第 1 步,准备好 Python 运行环境。本地编译安装 python 注意打开 `--enable-shared` 选项,不然后续安装 taospyudf 会因无法生成共享库而导致失败。 - 第 2 步,安装 Python 包 taospyudf。命令如下。 ```shell pip3 install taospyudf diff --git a/tests/script/sh/extract_avg.c b/tests/script/sh/extract_avg.c new file mode 100644 index 0000000000..508a73f7eb --- /dev/null +++ b/tests/script/sh/extract_avg.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include "taos.h" +#include "taoserror.h" +#include "taosudf.h" + +// Define a structure to store sum and count +typedef struct { + double sum; + int count; +} SumCount; + +// initialization function +DLL_EXPORT int32_t extract_avg_init() { + udfTrace("extract_avg_init: Initializing UDF"); + return TSDB_CODE_SUCCESS; +} + +DLL_EXPORT int32_t extract_avg_start(SUdfInterBuf *interBuf) { + int32_t bufLen = sizeof(SumCount); + if (interBuf->bufLen < bufLen) { + udfError("extract_avg_start: Failed to execute UDF since input buflen:%d < %d", interBuf->bufLen, bufLen); + return TSDB_CODE_UDF_INVALID_BUFSIZE; + } + + // Initialize sum and count + SumCount *sumCount = (SumCount *)interBuf->buf; + sumCount->sum = 0.0; + sumCount->count = 0; + + interBuf->numOfResult = 0; + + udfTrace("extract_avg_start: Initialized sum=0.0, count=0"); + return TSDB_CODE_SUCCESS; +} + +DLL_EXPORT int32_t extract_avg(SUdfDataBlock *inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { + udfTrace("extract_avg: Processing data block with %d rows", inputBlock->numOfRows); + + // Check the number of columns in the input data block + if (inputBlock->numOfCols != 1) { + udfError("extract_avg: Invalid number of columns. Expected 1, got %d", inputBlock->numOfCols); + return TSDB_CODE_UDF_INVALID_INPUT; + } + + // Get the input column + SUdfColumn *inputCol = inputBlock->udfCols[0]; + + if (inputCol->colMeta.type != TSDB_DATA_TYPE_VARCHAR) { + udfError("extract_avg: Invalid data type. Expected VARCHAR, got %d", inputCol->colMeta.type); + return TSDB_CODE_UDF_INVALID_INPUT; + } + + // Read the current sum and count from interBuf + SumCount *sumCount = (SumCount *)interBuf->buf; + udfTrace("extract_avg: Starting with sum=%f, count=%d", sumCount->sum, sumCount->count); + + for (int i = 0; i < inputBlock->numOfRows; i++) { + if (udfColDataIsNull(inputCol, i)) { + udfTrace("extract_avg: Skipping NULL value at row %d", i); + continue; + } + + char *buf = (char *)udfColDataGetData(inputCol, i); + + char data[64]; + memset(data, 0, 64); + memcpy(data, varDataVal(buf), varDataLen(buf)); + + udfTrace("extract_avg: Processing row %d, data='%s'", i, data); + + char *rest = data; + char *token; + while ((token = strtok_r(rest, ",", &rest))) { + while (*token == ' ') token++; + int tokenLen = strlen(token); + while (tokenLen > 0 && token[tokenLen - 1] == ' ') token[--tokenLen] = '\0'; + + if (tokenLen == 0) { + udfTrace("extract_avg: Empty string encountered at row %d", i); + continue; + } + + char *endPtr; + double value = strtod(token, &endPtr); + + if (endPtr == token || *endPtr != '\0') { + udfError("extract_avg: Failed to convert string '%s' to double at row %d", token, i); + continue; + } + + sumCount->sum += value; + sumCount->count++; + udfTrace("extract_avg: Updated sum=%f, count=%d", sumCount->sum, sumCount->count); + } + } + + newInterBuf->bufLen = sizeof(SumCount); + newInterBuf->buf = (char *)malloc(newInterBuf->bufLen); + if (newInterBuf->buf == NULL) { + udfError("extract_avg: Failed to allocate memory for newInterBuf"); + return TSDB_CODE_UDF_INTERNAL_ERROR; + } + memcpy(newInterBuf->buf, sumCount, newInterBuf->bufLen); + newInterBuf->numOfResult = 0; + + udfTrace("extract_avg: Final sum=%f, count=%d", sumCount->sum, sumCount->count); + return TSDB_CODE_SUCCESS; +} + +DLL_EXPORT int32_t extract_avg_finish(SUdfInterBuf *interBuf, SUdfInterBuf *result) { + SumCount *sumCount = (SumCount *)interBuf->buf; + + double avg = (sumCount->count > 0) ? (sumCount->sum / sumCount->count) : 0.0; + + *(double *)result->buf = avg; + result->bufLen = sizeof(double); + result->numOfResult = sumCount->count > 0 ? 1 : 0; + + udfTrace("extract_avg_finish: Final result=%f (sum=%f, count=%d)", avg, sumCount->sum, sumCount->count); + return TSDB_CODE_SUCCESS; +} + +DLL_EXPORT int32_t extract_avg_destroy() { + udfTrace("extract_avg_destroy: Cleaning up UDF"); + return TSDB_CODE_SUCCESS; +} From d02196a231e8a562d2ae2fa438a8b5e988d778a1 Mon Sep 17 00:00:00 2001 From: Simon Guan Date: Tue, 18 Mar 2025 17:55:36 +0800 Subject: [PATCH 10/23] refactor: adjust some logs (#30245) * refactor: adjust wal log infos * refactor: adjust some log's level * refactor: adjust some log's level * fix: add traceId for sync module * refactor: adjust log level * refactor: adjust log level * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * docs: update keywords * fix: compile errors in windows * docs: minor changes * docs: minor changes * refactor: minor changes * docs: fminro changes * docs: format * refactr: remove uncessory logs * docs: format doc --- docs/zh/06-advanced/01-subscription.md | 4 +-- .../06-TDgpt/04-forecast/04-lstm.md | 9 ----- .../06-TDgpt/04-forecast/05-mlp.md | 2 +- source/common/src/tanalytics.c | 4 +-- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 4 +-- source/dnode/mnode/impl/src/mndStream.c | 4 +-- source/dnode/mnode/impl/src/mndStreamTrans.c | 2 +- source/dnode/mnode/impl/src/mndTrans.c | 2 +- source/dnode/mnode/impl/src/mndUser.c | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 2 +- source/dnode/vnode/src/sma/smaEnv.c | 2 +- source/dnode/vnode/src/tq/tq.c | 2 +- source/dnode/vnode/src/tq/tqRead.c | 6 ++-- source/dnode/vnode/src/vnd/vnodeSvr.c | 2 +- source/libs/executor/src/dataDeleter.c | 2 +- source/libs/executor/src/scanoperator.c | 8 ++--- source/libs/index/src/index.c | 4 +-- source/libs/index/src/indexTfile.c | 4 +-- source/libs/parser/src/parTranslater.c | 2 +- source/libs/planner/src/planOptimizer.c | 4 +-- source/libs/planner/src/planSpliter.c | 4 +-- source/libs/qworker/src/qworker.c | 2 +- source/libs/stream/src/streamBackendRocksdb.c | 36 +++++++++---------- source/libs/stream/src/streamSnapshot.c | 4 +-- source/libs/stream/src/tstreamFileState.c | 4 +-- source/libs/sync/src/syncRequestVoteReply.c | 2 +- source/libs/transport/src/thttp.c | 34 +++++++++--------- source/libs/transport/src/transCli.c | 2 +- source/libs/transport/src/transSvr.c | 30 ++++++++-------- tools/taos-tools/src/benchCommandOpt.c | 4 +-- tools/taos-tools/src/benchData.c | 4 +-- tools/taos-tools/src/benchInsert.c | 12 +++---- tools/taos-tools/src/benchUtil.c | 2 +- tools/taos-tools/src/taosdump.c | 14 ++++---- 34 files changed, 108 insertions(+), 117 deletions(-) diff --git a/docs/zh/06-advanced/01-subscription.md b/docs/zh/06-advanced/01-subscription.md index 8bd44bf2a0..3b5244cc7c 100644 --- a/docs/zh/06-advanced/01-subscription.md +++ b/docs/zh/06-advanced/01-subscription.md @@ -64,7 +64,7 @@ CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS DATABASE db_name; ## 删除主题 -如果不再需要订阅数据,可以删除 topic,如果当前 topic 被消费者订阅,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法3.3.6.0版本开始支持)。 +如果不再需要订阅数据,可以删除 topic,如果当前 topic 被消费者订阅,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法从 v3.3.6.0 开始支持)。 ```sql DROP TOPIC [IF EXISTS] [FORCE] topic_name; @@ -94,7 +94,7 @@ SHOW CONSUMERS; ### 删除消费组 -消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是可以删除消费者组。如果当前消费者组里有消费者在消费,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法3.3.6.0版本开始支持)。 +消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是可以删除消费者组。如果当前消费者组里有消费者在消费,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法从 v3.3.6.0 开始支持)。 ```sql DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name; ``` diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/04-lstm.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/04-lstm.md index b4ab0e5bd6..28ae919de0 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/04-lstm.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/04-lstm.md @@ -7,14 +7,6 @@ sidebar_label: "LSTM" ## 功能概述 -LSTM模型即长短期记忆网络(Long Short Term Memory),是一种特殊的循环神经网络,适用于处理时间序列数据、自然语言处理等任务,通过其独特的门控机制,能够有效捕捉长期依赖关系, -解决传统RNN的梯度消失问题,从而对序列数据进行准确预测,不过它不直接提供计算的置信区间范围结果。 - - -完整的调用SQL语句如下: -```SQL -SELECT _frowts, FORECAST(i32, "algo=lstm") from foo -======= LSTM 模型即长短期记忆网络(Long Short Term Memory),是一种特殊的循环神经网络,适用于处理时间序列数据、自然语言处理等任务,通过其独特的门控机制,能够有效捕捉长期依赖关系, 解决传统 RNN 的梯度消失问题,从而对序列数据进行准确预测,不过它不直接提供计算的置信区间范围结果。 @@ -22,7 +14,6 @@ LSTM 模型即长短期记忆网络(Long Short Term Memory),是一种特殊的 完整的调用 SQL 语句如下: ```SQL SELECT _frowts, FORECAST(i32, "algo=lstm,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") from foo ->>>>>>> 3.0 ``` ```json5 diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/05-mlp.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/05-mlp.md index df8b6128cd..2263d4f3d9 100644 --- a/docs/zh/06-advanced/06-TDgpt/04-forecast/05-mlp.md +++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/05-mlp.md @@ -23,7 +23,7 @@ SELECT _frowts, FORECAST(i32, "algo=mlp") from foo "rows": fc_rows, // 返回结果的行数 "period": period, // 返回结果的周期性,同输入 "alpha": alpha, // 返回结果的置信区间,同输入 -"algo": "mlp", // 返回结果使用的算法 +"algo": "mlp", // 返回结果使用的算法 "mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE) "res": res // 列模式的结果 } diff --git a/source/common/src/tanalytics.c b/source/common/src/tanalytics.c index 397accc0b1..8818839f09 100644 --- a/source/common/src/tanalytics.c +++ b/source/common/src/tanalytics.c @@ -559,10 +559,10 @@ static int32_t taosAnalyJsonBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - bufLen += tsnprintf(buf + bufLen, sizeof(buf) - bufLen, "%" PRId64 "", *(int64_t *)colValue); + bufLen += tsnprintf(buf + bufLen, sizeof(buf) - bufLen, "%" PRId64, *(int64_t *)colValue); break; case TSDB_DATA_TYPE_UBIGINT: - bufLen += tsnprintf(buf + bufLen, sizeof(buf) - bufLen, "%" PRIu64 "", *(uint64_t *)colValue); + bufLen += tsnprintf(buf + bufLen, sizeof(buf) - bufLen, "%" PRIu64, *(uint64_t *)colValue); break; case TSDB_DATA_TYPE_FLOAT: bufLen += tsnprintf(buf + bufLen, sizeof(buf) - bufLen, "%f", GET_FLOAT_VAL(colValue)); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 7ea477821a..fc4ead8973 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -48,10 +48,10 @@ static void dmUpdateDnodeCfg(SDnodeMgmt *pMgmt, SDnodeCfg *pCfg) { static void dmMayShouldUpdateIpWhiteList(SDnodeMgmt *pMgmt, int64_t ver) { int32_t code = 0; - dDebug("ip-white-list on dnode ver: %" PRId64 ", status ver: %" PRId64 "", pMgmt->pData->ipWhiteVer, ver); + dDebug("ip-white-list on dnode ver: %" PRId64 ", status ver: %" PRId64, pMgmt->pData->ipWhiteVer, ver); if (pMgmt->pData->ipWhiteVer == ver) { if (ver == 0) { - dDebug("disable ip-white-list on dnode ver: %" PRId64 ", status ver: %" PRId64 "", pMgmt->pData->ipWhiteVer, ver); + dDebug("disable ip-white-list on dnode ver: %" PRId64 ", status ver: %" PRId64, pMgmt->pData->ipWhiteVer, ver); if (rpcSetIpWhite(pMgmt->msgCb.serverRpc, NULL) != 0) { dError("failed to disable ip white list on dnode"); } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 28dd5978dd..324f401cf6 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1183,7 +1183,7 @@ int64_t mndStreamGenChkptId(SMnode *pMnode, bool lock) { if (pIter == NULL) break; maxChkptId = TMAX(maxChkptId, pStream->checkpointId); - mDebug("stream:%p, %s id:0x%" PRIx64 " checkpoint %" PRId64 "", pStream, pStream->name, pStream->uid, + mDebug("stream:%p, %s id:0x%" PRIx64 " checkpoint %" PRId64, pStream, pStream->name, pStream->uid, pStream->checkpointId); sdbRelease(pSdb, pStream); } @@ -1257,7 +1257,7 @@ static int32_t mndProcessStreamCheckpointTrans(SMnode *pMnode, SStreamObj *pStre goto _ERR; } - mDebug("start to trigger checkpoint for stream:%s, checkpoint: %" PRId64 "", pStream->name, checkpointId); + mDebug("start to trigger checkpoint for stream:%s, checkpoint: %" PRId64, pStream->name, checkpointId); taosWLockLatch(&pStream->lock); pStream->currentTick = 1; diff --git a/source/dnode/mnode/impl/src/mndStreamTrans.c b/source/dnode/mnode/impl/src/mndStreamTrans.c index 1162796554..7cee4971b7 100644 --- a/source/dnode/mnode/impl/src/mndStreamTrans.c +++ b/source/dnode/mnode/impl/src/mndStreamTrans.c @@ -267,7 +267,7 @@ _over: } terrno = 0; - mTrace("stream:%s, encode to raw:%p, row:%p, checkpoint:%" PRId64 "", pStream->name, pRaw, pStream, + mTrace("stream:%s, encode to raw:%p, row:%p, checkpoint:%" PRId64, pStream->name, pRaw, pStream, pStream->checkpointId); return pRaw; } diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 57e1e01f2c..ab14886e13 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1945,7 +1945,7 @@ void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) { bool continueExec = true; while (continueExec) { - mInfo("trans:%d, continue to execute stage:%s in %s, createTime:%" PRId64 "", pTrans->id, + mInfo("trans:%d, continue to execute stage:%s in %s, createTime:%" PRId64, pTrans->id, mndTransStr(pTrans->stage), mndStrExecutionContext(topHalf), pTrans->createdTime); pTrans->lastExecTime = taosGetTimestampMs(); switch (pTrans->stage) { diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c index 5cfa8bd82c..c34def8d3e 100644 --- a/source/dnode/mnode/impl/src/mndUser.c +++ b/source/dnode/mnode/impl/src/mndUser.c @@ -338,7 +338,7 @@ int64_t mndGetIpWhiteVer(SMnode *pMnode) { if (mndEnableIpWhiteList(pMnode) == 0 || tsEnableWhiteList == false) { ver = 0; } - mDebug("ip-white-list on mnode ver: %" PRId64 "", ver); + mDebug("ip-white-list on mnode ver: %" PRId64, ver); return ver; } diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c2bdbd32bb..a327a5835c 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1524,7 +1524,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) { memcpy(p->pTagVal, val, len); tdbFree(val); } else { - metaError("vgId:%d, failed to table tags, suid: %" PRId64 ", uid: %" PRId64 "", TD_VID(pMeta->pVnode), suid, + metaError("vgId:%d, failed to table tags, suid: %" PRId64 ", uid: %" PRId64, TD_VID(pMeta->pVnode), suid, p->uid); } } diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index ba486bece5..b51163133e 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -457,7 +457,7 @@ static int32_t tdRsmaStopExecutor(const SSma *pSma) { for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) { if (taosCheckPthreadValid(pthread[i])) { - smaDebug("vgId:%d, start to join pthread for rsma:%" PRId64 "", SMA_VID(pSma), taosGetPthreadId(pthread[i])); + smaDebug("vgId:%d, start to join pthread for rsma:%" PRId64, SMA_VID(pSma), taosGetPthreadId(pthread[i])); (void)taosThreadJoin(pthread[i], NULL); } } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 7ddb45b7a6..933a17f37b 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -736,7 +736,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg req.vgId, req.subKey, req.newConsumerId, req.oldConsumerId); } if (req.newConsumerId == -1) { - tqError("vgId:%d, tq invalid rebalance request, new consumerId %" PRId64 "", req.vgId, req.newConsumerId); + tqError("vgId:%d, tq invalid rebalance request, new consumerId %" PRId64, req.vgId, req.newConsumerId); ret = TSDB_CODE_INVALID_PARA; goto end; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 6f14cb6810..7588ebb7f6 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -565,7 +565,7 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) { void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t)); TSDB_CHECK_CONDITION(ret == NULL, code, lino, END, true); - tqTrace("iterator data block in hash jump block, progress:%d/%d, uid:%" PRId64 "", pReader->nextBlk, blockSz, uid); + tqTrace("iterator data block in hash jump block, progress:%d/%d, uid:%" PRId64, pReader->nextBlk, blockSz, uid); pReader->nextBlk++; } @@ -593,7 +593,7 @@ bool tqNextDataBlockFilterOut(STqReader* pReader, SHashObj* filterOutUids) { uid = pSubmitTbData->uid; void* ret = taosHashGet(filterOutUids, &pSubmitTbData->uid, sizeof(int64_t)); TSDB_CHECK_NULL(ret, code, lino, END, true); - tqTrace("iterator data block in hash jump block, progress:%d/%d, uid:%" PRId64 "", pReader->nextBlk, blockSz, uid); + tqTrace("iterator data block in hash jump block, progress:%d/%d, uid:%" PRId64, pReader->nextBlk, blockSz, uid); pReader->nextBlk++; } tqReaderClearSubmitMsg(pReader); @@ -1848,7 +1848,7 @@ bool tqNextVTableSourceBlockImpl(STqReader* pReader, const char* idstr) { return true; } } - tqTrace("iterator data block in hash jump block, progress:%d/%d, uid:%" PRId64 "", pReader->nextBlk, blockSz, + tqTrace("iterator data block in hash jump block, progress:%d/%d, uid:%" PRId64, pReader->nextBlk, blockSz, pTbUid); pReader->nextBlk++; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index b1a3297ace..3563b71762 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -617,7 +617,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg (void)taosThreadMutexUnlock(&pVnode->mutex); if (ver <= pVnode->state.applied) { - vError("vgId:%d, duplicate write request. ver: %" PRId64 ", applied: %" PRId64 "", TD_VID(pVnode), ver, + vError("vgId:%d, duplicate write request. ver: %" PRId64 ", applied: %" PRId64, TD_VID(pVnode), ver, pVnode->state.applied); return terrno = TSDB_CODE_VND_DUP_REQUEST; } diff --git a/source/libs/executor/src/dataDeleter.c b/source/libs/executor/src/dataDeleter.c index 26e1abeee5..97dead559f 100644 --- a/source/libs/executor/src/dataDeleter.c +++ b/source/libs/executor/src/dataDeleter.c @@ -98,7 +98,7 @@ static int32_t toDataCacheEntry(SDataDeleterHandle* pHandle, const SInputData* p pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey; } - qDebug("delete %" PRId64 " rows, from %" PRId64 " to %" PRId64 "", pRes->affectedRows, pRes->skey, pRes->ekey); + qDebug("delete %" PRId64 " rows, from %" PRId64 " to %" PRId64, pRes->affectedRows, pRes->skey, pRes->ekey); pBuf->useSize += pEntry->dataLen; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index a1113adac5..7b54ef878c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3340,7 +3340,7 @@ static int32_t doQueueScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { bool hasPrimaryKey = pAPI->tqReaderFn.tqGetTablePrimaryKey(pInfo->tqReader); code = processPrimaryKey(pResult, hasPrimaryKey, &pTaskInfo->streamInfo.currentOffset); QUERY_CHECK_CODE(code, lino, _end); - qDebug("tmqsnap doQueueScan get data utid:%" PRId64 "", pResult->info.id.uid); + qDebug("tmqsnap doQueueScan get data utid:%" PRId64, pResult->info.id.uid); if (pResult->info.rows > 0) { (*ppRes) = pResult; return code; @@ -3355,7 +3355,7 @@ static int32_t doQueueScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { pTSInfo->base.dataReader = NULL; int64_t validVer = pTaskInfo->streamInfo.snapshotVer + 1; - qDebug("queue scan tsdb over, switch to wal ver %" PRId64 "", validVer); + qDebug("queue scan tsdb over, switch to wal ver %" PRId64, validVer); if (pAPI->tqReaderFn.tqReaderSeek(pInfo->tqReader, validVer, pTaskInfo->id.str) < 0) { (*ppRes) = NULL; return code; @@ -4316,7 +4316,7 @@ static int32_t doRawScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { bool hasPrimaryKey = pAPI->snapshotFn.taosXGetTablePrimaryKey(pInfo->sContext); code = processPrimaryKey(pBlock, hasPrimaryKey, &pTaskInfo->streamInfo.currentOffset); QUERY_CHECK_CODE(code, lino, _end); - qDebug("tmqsnap doRawScan get data uid:%" PRId64 "", pBlock->info.id.uid); + qDebug("tmqsnap doRawScan get data uid:%" PRId64, pBlock->info.id.uid); (*ppRes) = pBlock; return code; } @@ -4336,7 +4336,7 @@ static int32_t doRawScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { } else { SValue val = {0}; tqOffsetResetToData(&offset, mtInfo.uid, INT64_MIN, val); - qDebug("tmqsnap change get data uid:%" PRId64 "", mtInfo.uid); + qDebug("tmqsnap change get data uid:%" PRId64, mtInfo.uid); } destroyMetaTableInfo(&mtInfo); code = qStreamPrepareScan(pTaskInfo, &offset, pInfo->sContext->subType); diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c index 2e08f486ff..18f85fd4a1 100644 --- a/source/libs/index/src/index.c +++ b/source/libs/index/src/index.c @@ -790,7 +790,7 @@ static int64_t idxGetAvailableVer(SIndex* sIdx, IndexCache* cache) { if (rd != NULL) { ver = (ver > rd->header.version ? ver : rd->header.version) + 1; - indexInfo("header: %" PRId64 ", ver: %" PRId64 "", rd->header.version, ver); + indexInfo("header: %" PRId64 ", ver: %" PRId64, rd->header.version, ver); } tfileReaderUnRef(rd); return ver; @@ -799,7 +799,7 @@ static int32_t idxGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) { int32_t code = 0; int64_t version = idxGetAvailableVer(sIdx, cache); - indexInfo("file name version: %" PRId64 "", version); + indexInfo("file name version: %" PRId64, version); TFileWriter* tw = NULL; diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c index b67873a8b4..267bcc31ec 100644 --- a/source/libs/index/src/indexTfile.c +++ b/source/libs/index/src/indexTfile.c @@ -603,7 +603,7 @@ int32_t tfileReaderOpen(SIndex* idx, uint64_t suid, int64_t version, const char* return code; } wc->lru = idx->lru; - indexTrace("open read file name:%s, file size: %" PRId64 "", wc->file.buf, wc->file.size); + indexTrace("open read file name:%s, file size: %" PRId64, wc->file.buf, wc->file.size); return tfileReaderCreate(wc, pReader); } @@ -787,7 +787,7 @@ int idxTFileSearch(void* tfile, SIndexTermQuery* query, SIdxTRslt* result) { return 0; } int64_t cost = taosGetTimestampUs() - st; - indexInfo("index tfile stage 1 cost: %" PRId64 "", cost); + indexInfo("index tfile stage 1 cost: %" PRId64, cost); return tfileReaderSearch(reader, query, result); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index d74834d298..8f2758fe7a 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3357,7 +3357,7 @@ static int32_t rewriteQueryTimeFunc(STranslateContext* pCxt, int64_t val, SNode* if (NULL == pStr) { return terrno; } - snprintf(pStr, 20, "%" PRId64 "", val); + snprintf(pStr, 20, "%" PRId64, val); int32_t code = rewriteFuncToValue(pCxt, &pStr, pNode); if (TSDB_CODE_SUCCESS != code) taosMemoryFree(pStr); return code; diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 550a14b002..abdf8065cf 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -4078,7 +4078,7 @@ static int32_t rewriteUniqueOptCreateFirstFunc(SFunctionNode* pSelectValue, SNod } else { int64_t pointer = (int64_t)pFunc; char name[TSDB_FUNC_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0}; - int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pFunc->functionName, pointer); + int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64, pFunc->functionName, pointer); (void)taosHashBinary(name, len); tstrncpy(pFunc->node.aliasName, name, TSDB_COL_NAME_LEN); } @@ -7521,7 +7521,7 @@ static int32_t tsmaOptCreateWStart(int8_t precision, SFunctionNode** pWStartOut) tstrncpy(pWStart->functionName, "_wstart", TSDB_FUNC_NAME_LEN); int64_t pointer = (int64_t)pWStart; char name[TSDB_COL_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0}; - int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pWStart->functionName, pointer); + int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64, pWStart->functionName, pointer); (void)taosHashBinary(name, len); tstrncpy(pWStart->node.aliasName, name, TSDB_COL_NAME_LEN); pWStart->node.resType.precision = precision; diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 84ce3217a8..ac3208a611 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -444,7 +444,7 @@ static int32_t stbSplAppendWStart(SNodeList* pFuncs, int32_t* pIndex, uint8_t pr tstrncpy(pWStart->functionName, "_wstart", TSDB_FUNC_NAME_LEN); int64_t pointer = (int64_t)pWStart; char name[TSDB_COL_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0}; - int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pWStart->functionName, pointer); + int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64, pWStart->functionName, pointer); (void)taosHashBinary(name, len); tstrncpy(pWStart->node.aliasName, name, TSDB_COL_NAME_LEN); pWStart->node.resType.precision = precision; @@ -476,7 +476,7 @@ static int32_t stbSplAppendWEnd(SWindowLogicNode* pWin, int32_t* pIndex) { tstrncpy(pWEnd->functionName, "_wend", TSDB_FUNC_NAME_LEN); int64_t pointer = (int64_t)pWEnd; char name[TSDB_COL_NAME_LEN + TSDB_POINTER_PRINT_BYTES + TSDB_NAME_DELIMITER_LEN + 1] = {0}; - int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64 "", pWEnd->functionName, pointer); + int32_t len = tsnprintf(name, sizeof(name) - 1, "%s.%" PRId64, pWEnd->functionName, pointer); (void)taosHashBinary(name, len); tstrncpy(pWEnd->node.aliasName, name, TSDB_COL_NAME_LEN); diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 024b166d41..0db1832e17 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -302,7 +302,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, QW_SINK_DISABLE_MEMPOOL(); if (len < 0) { - QW_TASK_ELOG("invalid length from dsGetDataLength, length:%" PRId64 "", len); + QW_TASK_ELOG("invalid length from dsGetDataLength, length:%" PRId64, len); QW_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index eb011f91f8..ced9400df0 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -228,7 +228,7 @@ int32_t rebuildDirFromCheckpoint(const char* path, int64_t chkpId, char** dst) { return terrno; } - nBytes = snprintf(chkp, cap, "%s%s%s%scheckpoint%" PRId64 "", path, TD_DIRSEP, "checkpoints", TD_DIRSEP, chkpId); + nBytes = snprintf(chkp, cap, "%s%s%s%scheckpoint%" PRId64, path, TD_DIRSEP, "checkpoints", TD_DIRSEP, chkpId); if (nBytes <= 0 || nBytes >= cap) { taosMemoryFree(state); taosMemoryFree(chkp); @@ -349,7 +349,7 @@ int32_t remoteChkp_validAndCvtMeta(char* path, SSChkpMetaOnS3* pMeta, int64_t ch goto _EXIT; } - nBytes = snprintf(src, cap, "%s%s%s_%" PRId64 "", path, TD_DIRSEP, key, pMeta->currChkptId); + nBytes = snprintf(src, cap, "%s%s%s_%" PRId64, path, TD_DIRSEP, key, pMeta->currChkptId); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; goto _EXIT; @@ -402,7 +402,7 @@ int32_t remoteChkpGetDelFile(char* path, SArray* toDel) { return terrno; } - nBytes = snprintf(p, cap, "%s_%" PRId64 "", key, pMeta->currChkptId); + nBytes = snprintf(p, cap, "%s_%" PRId64, key, pMeta->currChkptId); if (nBytes <= 0 || nBytes >= cap) { taosMemoryFree(pMeta); taosMemoryFree(p); @@ -752,7 +752,7 @@ int32_t restoreCheckpointData(const char* path, const char* key, int64_t chkptId stDebug("%s check local backend dir:%s, checkpointId:%" PRId64 " succ", key, defaultPath, chkptId); if (chkptId > 0) { - nBytes = snprintf(checkpointPath, cap, "%s%s%s%s%s%" PRId64 "", prefixPath, TD_DIRSEP, "checkpoints", TD_DIRSEP, + nBytes = snprintf(checkpointPath, cap, "%s%s%s%s%s%" PRId64, prefixPath, TD_DIRSEP, "checkpoints", TD_DIRSEP, "checkpoint", chkptId); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; @@ -1088,7 +1088,7 @@ int32_t delObsoleteCheckpoint(void* arg, const char* path) { for (int i = 0; i < taosArrayGetSize(chkpDel); i++) { int64_t id = *(int64_t*)taosArrayGet(chkpDel, i); char tbuf[256] = {0}; - sprintf(tbuf, "%s%scheckpoint%" PRId64 "", path, TD_DIRSEP, id); + sprintf(tbuf, "%s%scheckpoint%" PRId64, path, TD_DIRSEP, id); if (taosIsDir(tbuf)) { taosRemoveDir(tbuf); } @@ -1166,7 +1166,7 @@ int32_t chkpMayDelObsolete(void* arg, int64_t chkpId, char* path) { for (int i = 0; i < taosArrayGetSize(chkpDel); i++) { int64_t id = *(int64_t*)taosArrayGet(chkpDel, i); char tbuf[256] = {0}; - if (snprintf(tbuf, sizeof(tbuf), "%s%scheckpoint%" PRId64 "", path, TD_DIRSEP, id) >= sizeof(tbuf)) { + if (snprintf(tbuf, sizeof(tbuf), "%s%scheckpoint%" PRId64, path, TD_DIRSEP, id) >= sizeof(tbuf)) { code = TSDB_CODE_OUT_OF_RANGE; TAOS_CHECK_GOTO(code, NULL, _exception); } @@ -1222,7 +1222,7 @@ int32_t taskDbLoadChkpInfo(STaskDbWrapper* pBackend) { char checkpointPrefix[32] = {0}; int64_t checkpointId = 0; - int ret = sscanf(taosGetDirEntryName(de), "checkpoint%" PRId64 "", &checkpointId); + int ret = sscanf(taosGetDirEntryName(de), "checkpoint%" PRId64, &checkpointId); if (ret == 1) { if (taosArrayPush(pBackend->chkpSaved, &checkpointId) == NULL) { TAOS_CHECK_GOTO(terrno, NULL, _exception); @@ -1439,7 +1439,7 @@ int32_t taskDbDestroySnap(void* arg, SArray* pSnapInfo) { } STaskDbWrapper** pTaskDb = taosHashGet(pMeta->pTaskDbUnique, buf, strlen(buf)); if (pTaskDb == NULL || *pTaskDb == NULL) { - stWarn("stream backend:%p failed to find task db, streamId:% " PRId64 "", pMeta, pSnap->streamId); + stWarn("stream backend:%p failed to find task db, streamId:% " PRId64, pMeta, pSnap->streamId); memset(buf, 0, sizeof(buf)); continue; } @@ -1540,7 +1540,7 @@ int32_t chkpLoadExtraInfo(char* pChkpIdDir, int64_t* chkpId, int64_t* processId) goto _EXIT; } - if (sscanf(buf, "%" PRId64 " %" PRId64 "", chkpId, processId) < 2) { + if (sscanf(buf, "%" PRId64 " %" PRId64, chkpId, processId) < 2) { code = TSDB_CODE_INVALID_PARA; stError("failed to read file content to load extra info, file:%s, reason:%s", pDst, tstrerror(code)); goto _EXIT; @@ -1588,7 +1588,7 @@ int32_t chkpAddExtraInfo(char* pChkpIdDir, int64_t chkpId, int64_t processId) { goto _EXIT; } - nBytes = snprintf(buf, sizeof(buf), "%" PRId64 " %" PRId64 "", chkpId, processId); + nBytes = snprintf(buf, sizeof(buf), "%" PRId64 " %" PRId64, chkpId, processId); if (nBytes <= 0 || nBytes >= sizeof(buf)) { code = TSDB_CODE_OUT_OF_RANGE; stError("failed to build content to add extra info, dir:%s,reason:%s", pChkpIdDir, tstrerror(code)); @@ -1633,11 +1633,11 @@ int32_t taskDbDoCheckpoint(void* arg, int64_t chkpId, int64_t processId) { // flush db if (written > 0) { - stDebug("stream backend:%p start to flush db at:%s, data written:%" PRId64 "", pTaskDb, pChkpIdDir, written); + stDebug("stream backend:%p start to flush db at:%s, data written:%" PRId64, pTaskDb, pChkpIdDir, written); code = chkpPreFlushDb(pTaskDb->db, ppCf, nCf); if (code != 0) goto _EXIT; } else { - stDebug("stream backend:%p not need flush db at:%s, data written:%" PRId64 "", pTaskDb, pChkpIdDir, written); + stDebug("stream backend:%p not need flush db at:%s, data written:%" PRId64, pTaskDb, pChkpIdDir, written); } // do checkpoint @@ -2724,7 +2724,7 @@ int32_t taskDbGenChkpUploadData__rsync(STaskDbWrapper* pDb, int64_t chkpId, char } nBytes = - snprintf(buf, cap, "%s%s%s%s%s%" PRId64 "", pDb->path, TD_DIRSEP, "checkpoints", TD_DIRSEP, "checkpoint", chkpId); + snprintf(buf, cap, "%s%s%s%s%s%" PRId64, pDb->path, TD_DIRSEP, "checkpoints", TD_DIRSEP, "checkpoint", chkpId); if (nBytes <= 0 || nBytes >= cap) { taosMemoryFree(buf); TAOS_UNUSED(taosReleaseRef(taskDbWrapperId, refId)); @@ -4474,7 +4474,7 @@ int32_t streamDefaultIterGet_rocksdb(SStreamState* pState, const void* start, co } if (strncmp(key, start, strlen(start)) == 0 && strlen(key) >= strlen(start) + 1) { int64_t checkPoint = 0; - if (sscanf(key + strlen(key), ":%" PRId64 "", &checkPoint) == 1) { + if (sscanf(key + strlen(key), ":%" PRId64, &checkPoint) == 1) { if (taosArrayPush(result, &checkPoint) == NULL) { code = terrno; break; @@ -4852,7 +4852,7 @@ int32_t dbChkpGetDelta(SDbChkp* p, int64_t chkpId, SArray* list) { memset(p->buf, 0, p->len); nBytes = - snprintf(p->buf, p->len, "%s%s%s%scheckpoint%" PRId64 "", p->path, TD_DIRSEP, "checkpoints", TD_DIRSEP, chkpId); + snprintf(p->buf, p->len, "%s%s%s%scheckpoint%" PRId64, p->path, TD_DIRSEP, "checkpoints", TD_DIRSEP, chkpId); if (nBytes <= 0 || nBytes >= p->len) { TAOS_UNUSED(taosThreadRwlockUnlock(&p->rwLock)); return TSDB_CODE_OUT_OF_RANGE; @@ -5070,7 +5070,7 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { char* srcDir = &dstBuf[cap]; char* dstDir = &srcDir[cap]; - int nBytes = snprintf(srcDir, cap, "%s%s%s%s%s%" PRId64 "", p->path, TD_DIRSEP, "checkpoints", TD_DIRSEP, + int nBytes = snprintf(srcDir, cap, "%s%s%s%s%s%" PRId64, p->path, TD_DIRSEP, "checkpoints", TD_DIRSEP, "checkpoint", p->curChkpId); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; @@ -5145,7 +5145,7 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { goto _ERROR; } - nBytes = snprintf(dstBuf, cap, "%s%s%s_%" PRId64 "", dstDir, TD_DIRSEP, p->pCurrent, p->curChkpId); + nBytes = snprintf(dstBuf, cap, "%s%s%s_%" PRId64, dstDir, TD_DIRSEP, p->pCurrent, p->curChkpId); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; goto _ERROR; @@ -5167,7 +5167,7 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { goto _ERROR; } - nBytes = snprintf(dstBuf, cap, "%s%s%s_%" PRId64 "", dstDir, TD_DIRSEP, p->pManifest, p->curChkpId); + nBytes = snprintf(dstBuf, cap, "%s%s%s_%" PRId64, dstDir, TD_DIRSEP, p->pManifest, p->curChkpId); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; goto _ERROR; diff --git a/source/libs/stream/src/streamSnapshot.c b/source/libs/stream/src/streamSnapshot.c index 0f322f48c5..07123baf5d 100644 --- a/source/libs/stream/src/streamSnapshot.c +++ b/source/libs/stream/src/streamSnapshot.c @@ -392,7 +392,7 @@ int32_t streamBackendSnapInitFile(char* metaPath, SStreamTaskSnap* pSnap, SBacke return terrno; } - nBytes = snprintf(path, cap, "%s%s%s%s%s%" PRId64 "", pSnap->dbPrefixPath, TD_DIRSEP, "checkpoints", TD_DIRSEP, + nBytes = snprintf(path, cap, "%s%s%s%s%s%" PRId64, pSnap->dbPrefixPath, TD_DIRSEP, "checkpoints", TD_DIRSEP, "checkpoint", pSnap->chkpId); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; @@ -817,7 +817,7 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa return terrno; } - int32_t ret = snprintf(path, bufLen, "%s%s%s%s%s%s%s%" PRId64 "", pHandle->metaPath, TD_DIRSEP, idstr, TD_DIRSEP, + int32_t ret = snprintf(path, bufLen, "%s%s%s%s%s%s%s%" PRId64, pHandle->metaPath, TD_DIRSEP, idstr, TD_DIRSEP, "checkpoints", TD_DIRSEP, "checkpoint", snapInfo.chkpId); if (ret < 0 || ret >= bufLen) { stError("s-task:0x%x failed to set the path for take snapshot, code: out of buffer, %s", (int32_t)snapInfo.taskId, diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 49e5130d02..1305eb6bdd 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -1020,7 +1020,7 @@ _end: int32_t forceRemoveCheckpoint(SStreamFileState* pFileState, int64_t checkpointId) { char keyBuf[128] = {0}; - TAOS_UNUSED(tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64 "", TASK_KEY, checkpointId)); + TAOS_UNUSED(tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64, TASK_KEY, checkpointId)); return streamDefaultDel_rocksdb(pFileState->pFileStore, keyBuf); } @@ -1045,7 +1045,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { char buf[128] = {0}; void* val = 0; int32_t len = 0; - TAOS_UNUSED(tsnprintf(buf, sizeof(buf), "%s:%" PRId64 "", TASK_KEY, i)); + TAOS_UNUSED(tsnprintf(buf, sizeof(buf), "%s:%" PRId64, TASK_KEY, i)); code = streamDefaultGet_rocksdb(pFileState->pFileStore, buf, &val, &len); if (code != 0) { return TSDB_CODE_FAILED; diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c index 8a7b45253d..58e398d1d9 100644 --- a/source/libs/sync/src/syncRequestVoteReply.c +++ b/source/libs/sync/src/syncRequestVoteReply.c @@ -70,7 +70,7 @@ int32_t syncNodeOnRequestVoteReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { // but they won't be looked at, so it doesn't matter. if (ths->state == TAOS_SYNC_STATE_CANDIDATE) { if (ths->pVotesRespond->term != pMsg->term) { - sNError(ths, "vote respond error vote-respond-mgr term:%" PRIu64 ", msg term:%" PRIu64 "", + sNError(ths, "vote respond error vote-respond-mgr term:%" PRIu64 ", msg term:%" PRIu64, ths->pVotesRespond->term, pMsg->term); TAOS_RETURN(TSDB_CODE_SYN_WRONG_TERM); diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index f75bc8b0ec..7ba79627d5 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -260,13 +260,13 @@ static int32_t httpCreateMsg(const char* server, const char* uri, uint16_t port, EHttpCompFlag flag, int64_t chanId, const char* qid, SHttpMsg** httpMsg) { int64_t seqNum = atomic_fetch_add_64(&httpSeqNum, 1); if (server == NULL || uri == NULL) { - tError("http-report failed to report to invalid addr, chanId:%" PRId64 ", seq:%" PRId64 "", chanId, seqNum); + tError("http-report failed to report to invalid addr, chanId:%" PRId64 ", seq:%" PRId64, chanId, seqNum); *httpMsg = NULL; return TSDB_CODE_INVALID_PARA; } if (pCont == NULL || contLen == 0) { - tError("http-report failed to report empty packet, chanId:%" PRId64 ", seq:%" PRId64 "", chanId, seqNum); + tError("http-report failed to report empty packet, chanId:%" PRId64 ", seq:%" PRId64, chanId, seqNum); *httpMsg = NULL; return TSDB_CODE_INVALID_PARA; } @@ -318,7 +318,7 @@ static void httpDestroyMsg(SHttpMsg* msg) { } static void httpDestroyMsgWrapper(void* cont, void* param) { SHttpMsg* pMsg = cont; - tWarn("http-report destroy msg, chanId:%" PRId64 ", seq:%" PRId64 "", pMsg->chanId, pMsg->seq); + tWarn("http-report destroy msg, chanId:%" PRId64 ", seq:%" PRId64, pMsg->chanId, pMsg->seq); httpDestroyMsg(pMsg); } @@ -360,7 +360,7 @@ static void httpTrace(queue* q) { msg = QUEUE_DATA(h, SHttpMsg, q); endSeq = msg->seq; - tDebug("http-report process msg, start_seq:%" PRId64 ", end_seq:%" PRId64 ", max_seq:%" PRId64 "", startSeq, endSeq, + tDebug("http-report process msg, start_seq:%" PRId64 ", end_seq:%" PRId64 ", max_seq:%" PRId64, startSeq, endSeq, atomic_load_64(&httpSeqNum) - 1); } @@ -438,9 +438,9 @@ static FORCE_INLINE void clientRecvCb(uv_stream_t* handle, ssize_t nread, const STUB_RAND_NETWORK_ERR(nread); SHttpClient* cli = handle->data; if (nread < 0) { - tError("http-report recv error:%s, seq:%" PRId64 "", uv_strerror(nread), cli->seq); + tError("http-report recv error:%s, seq:%" PRId64, uv_strerror(nread), cli->seq); } else { - tTrace("http-report succ to recv %d bytes, seq:%" PRId64 "", (int32_t)nread, cli->seq); + tTrace("http-report succ to recv %d bytes, seq:%" PRId64, (int32_t)nread, cli->seq); if (cli->recvBufRid > 0) { SHttpRecvBuf* p = taosAcquireRef(httpRecvRefMgt, cli->recvBufRid); if (p != NULL) { @@ -472,19 +472,19 @@ static void clientSentCb(uv_write_t* req, int32_t status) { STUB_RAND_NETWORK_ERR(status); SHttpClient* cli = req->data; if (status != 0) { - tError("http-report failed to send data, reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64 "", + tError("http-report failed to send data, reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64, uv_strerror(status), cli->addr, cli->port, cli->chanId, cli->seq); if (!uv_is_closing((uv_handle_t*)&cli->tcp)) { uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); } return; } else { - tTrace("http-report succ to send data, chanId:%" PRId64 ", seq:%" PRId64 "", cli->chanId, cli->seq); + tTrace("http-report succ to send data, chanId:%" PRId64 ", seq:%" PRId64, cli->chanId, cli->seq); } status = uv_read_start((uv_stream_t*)&cli->tcp, clientAllocBuffCb, clientRecvCb); if (status != 0) { - tError("http-report failed to recv data,reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64 "", + tError("http-report failed to recv data,reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64, uv_strerror(status), cli->addr, cli->port, cli->chanId, cli->seq); if (!uv_is_closing((uv_handle_t*)&cli->tcp)) { uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); @@ -499,7 +499,7 @@ static void clientConnCb(uv_connect_t* req, int32_t status) { SHttpModule* http = taosAcquireRef(httpRefMgt, chanId); if (status != 0) { httpFailFastMayUpdate(http->connStatusTable, cli->addr, cli->port, 0); - tError("http-report failed to conn to server, reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64 "", + tError("http-report failed to conn to server, reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64, uv_strerror(status), cli->addr, cli->port, chanId, cli->seq); if (!uv_is_closing((uv_handle_t*)&cli->tcp)) { uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); @@ -513,7 +513,7 @@ static void clientConnCb(uv_connect_t* req, int32_t status) { status = uv_write(&cli->req, (uv_stream_t*)&cli->tcp, cli->wbuf, 2, clientSentCb); if (0 != status) { - tError("http-report failed to send data,reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64 "", + tError("http-report failed to send data,reason:%s, dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64, uv_strerror(status), cli->addr, cli->port, chanId, cli->seq); if (!uv_is_closing((uv_handle_t*)&cli->tcp)) { uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); @@ -561,7 +561,7 @@ static void httpHandleQuit(SHttpMsg* msg) { int64_t chanId = msg->chanId; taosMemoryFree(msg); - tDebug("http-report receive quit, chanId:%" PRId64 ", seq:%" PRId64 "", chanId, seq); + tDebug("http-report receive quit, chanId:%" PRId64 ", seq:%" PRId64, chanId, seq); SHttpModule* http = taosAcquireRef(httpRefMgt, chanId); if (http == NULL) return; uv_walk(http->loop, httpWalkCb, NULL); @@ -778,7 +778,7 @@ static int32_t taosSendHttpReportImplByChan2(const char* server, const char* uri code = TSDB_CODE_HTTP_MODULE_QUIT; goto _ERROR; } - tDebug("http-report start to report, chanId:%" PRId64 ", seq:%" PRId64 "", chanId, msg->seq); + tDebug("http-report start to report, chanId:%" PRId64 ", seq:%" PRId64, chanId, msg->seq); code = transAsyncSend(load->asyncPool, &(msg->q)); if (code != 0) { @@ -790,7 +790,7 @@ static int32_t taosSendHttpReportImplByChan2(const char* server, const char* uri _ERROR: if (code != 0) { - tError("http-report failed to report reason:%s, chanId:%" PRId64 ", seq:%" PRId64 "", tstrerror(code), chanId, + tError("http-report failed to report reason:%s, chanId:%" PRId64 ", seq:%" PRId64, tstrerror(code), chanId, msg->seq); } httpDestroyMsg(msg); @@ -816,7 +816,7 @@ static int32_t taosSendHttpReportImplByChan(const char* server, const char* uri, code = TSDB_CODE_HTTP_MODULE_QUIT; goto _ERROR; } - tDebug("http-report start to report, chanId:%" PRId64 ", seq:%" PRId64 "", chanId, msg->seq); + tDebug("http-report start to report, chanId:%" PRId64 ", seq:%" PRId64, chanId, msg->seq); code = transAsyncSend(load->asyncPool, &(msg->q)); if (code != 0) { @@ -828,7 +828,7 @@ static int32_t taosSendHttpReportImplByChan(const char* server, const char* uri, _ERROR: if (code != 0) { - tError("http-report failed to report reason:%s, chanId:%" PRId64 ", seq:%" PRId64 "", tstrerror(code), chanId, + tError("http-report failed to report reason:%s, chanId:%" PRId64 ", seq:%" PRId64, tstrerror(code), chanId, msg->seq); } httpDestroyMsg(msg); @@ -957,7 +957,7 @@ void taosDestroyHttpChan(int64_t chanId) { } if (taosThreadJoin(load->thread, NULL) != 0) { - tTrace("http-report failed to join thread, chanId %" PRId64 "", chanId); + tTrace("http-report failed to join thread, chanId %" PRId64, chanId); } httpModuleDestroy(load); diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 7f0f1bb301..ff5ecf6600 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -595,7 +595,7 @@ int32_t cliHandleState_mayHandleReleaseResp(SCliConn* conn, STransMsgHead* pHead int64_t qId = taosHton64(pHead->qid); STraceId* trace = &pHead->traceId; int64_t seqNum = taosHton64(pHead->seqNum); - tGDebug("%s conn:%p, %s received from %s, local info:%s, len:%d, seqNum:%" PRId64 ", sid:%" PRId64 "", + tGDebug("%s conn:%p, %s received from %s, local info:%s, len:%d, seqNum:%" PRId64 ", sid:%" PRId64, CONN_GET_INST_LABEL(conn), conn, TMSG_INFO(pHead->msgType), conn->dst, conn->src, pHead->msgLen, seqNum, qId); diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 50dd3b41a9..f681aac794 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -395,11 +395,11 @@ static void uvPerfLog_receive(SSvrConn* pConn, STransMsgHead* pHead, STransMsg* if (pConn->status == ConnNormal && pHead->noResp == 0) { if (cost >= EXCEPTION_LIMIT_US) { tGDebug("%s conn:%p, %s received from %s, local info:%s, len:%d, cost:%dus, recv exception, seqNum:%" PRId64 - ", sid:%" PRId64 "", + ", sid:%" PRId64, transLabel(pInst), pConn, TMSG_INFO(pTransMsg->msgType), pConn->dst, pConn->src, pTransMsg->contLen, (int)cost, pTransMsg->info.seqNum, pTransMsg->info.qId); } else { - tGDebug("%s conn:%p, %s received from %s, local info:%s, len:%d, cost:%dus, seqNum:%" PRId64 ", sid:%" PRId64 "", + tGDebug("%s conn:%p, %s received from %s, local info:%s, len:%d, cost:%dus, seqNum:%" PRId64 ", sid:%" PRId64, transLabel(pInst), pConn, TMSG_INFO(pTransMsg->msgType), pConn->dst, pConn->src, pTransMsg->contLen, (int)cost, pTransMsg->info.seqNum, pTransMsg->info.qId); } @@ -407,13 +407,13 @@ static void uvPerfLog_receive(SSvrConn* pConn, STransMsgHead* pHead, STransMsg* if (cost >= EXCEPTION_LIMIT_US) { tGDebug( "%s conn:%p, %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus, recv exception, " - "seqNum:%" PRId64 ", sid:%" PRId64 "", + "seqNum:%" PRId64 ", sid:%" PRId64, transLabel(pInst), pConn, TMSG_INFO(pTransMsg->msgType), pConn->dst, pConn->src, pTransMsg->contLen, pHead->noResp, pTransMsg->code, (int)(cost), pTransMsg->info.seqNum, pTransMsg->info.qId); } else { tGDebug("%s conn:%p, %s received from %s, local info:%s, len:%d, noResp:%d, code:%d, cost:%dus, seqNum:%" PRId64 ", " - "sid:%" PRId64 "", + "sid:%" PRId64, transLabel(pInst), pConn, TMSG_INFO(pTransMsg->msgType), pConn->dst, pConn->src, pTransMsg->contLen, pHead->noResp, pTransMsg->code, (int)(cost), pTransMsg->info.seqNum, pTransMsg->info.qId); } @@ -441,23 +441,23 @@ static int32_t uvMayHandleReleaseReq(SSvrConn* pConn, STransMsgHead* pHead) { if (pHead->msgType == TDMT_SCH_TASK_RELEASE) { int64_t qId = taosHton64(pHead->qid); if (qId <= 0) { - tError("conn:%p, recv release, but invalid sid:%" PRId64 "", pConn, qId); + tError("conn:%p, recv release, but invalid sid:%" PRId64, pConn, qId); code = TSDB_CODE_RPC_NO_STATE; } else { void* p = taosHashGet(pConn->pQTable, &qId, sizeof(qId)); if (p == NULL) { code = TSDB_CODE_RPC_NO_STATE; - tTrace("conn:%p, recv release, and releady release by server sid:%" PRId64 "", pConn, qId); + tTrace("conn:%p, recv release, and releady release by server sid:%" PRId64, pConn, qId); } else { SSvrRegArg* arg = p; (pInst->cfp)(pInst->parent, &(arg->msg), NULL); - tTrace("conn:%p, recv release, notify server app, sid:%" PRId64 "", pConn, qId); + tTrace("conn:%p, recv release, notify server app, sid:%" PRId64, pConn, qId); code = taosHashRemove(pConn->pQTable, &qId, sizeof(qId)); if (code != 0) { - tDebug("conn:%p, failed to remove sid:%" PRId64 "", pConn, qId); + tDebug("conn:%p, failed to remove sid:%" PRId64, pConn, qId); } - tTrace("conn:%p, clear state,sid:%" PRId64 "", pConn, qId); + tTrace("conn:%p, clear state,sid:%" PRId64, pConn, qId); } } @@ -693,7 +693,7 @@ void uvOnSendCb(uv_write_t* req, int status) { SSvrRespMsg* smsg = QUEUE_DATA(head, SSvrRespMsg, q); STraceId* trace = &smsg->msg.info.traceId; - tGDebug("%s conn:%p, msg already send out, seqNum:%" PRId64 ", sid:%" PRId64 "", transLabel(conn->pInst), conn, + tGDebug("%s conn:%p, msg already send out, seqNum:%" PRId64 ", sid:%" PRId64, transLabel(conn->pInst), conn, smsg->msg.info.seqNum, smsg->msg.info.qId); destroySmsg(smsg); } @@ -772,7 +772,7 @@ static int32_t uvPrepareSendData(SSvrRespMsg* smsg, uv_buf_t* wb) { } STraceId* trace = &pMsg->info.traceId; - tGDebug("%s conn:%p, %s is sent to %s, local info:%s, len:%d, seqNum:%" PRId64 ", sid:%" PRId64 "", transLabel(pInst), + tGDebug("%s conn:%p, %s is sent to %s, local info:%s, len:%d, seqNum:%" PRId64 ", sid:%" PRId64, transLabel(pInst), pConn, TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, len, pMsg->info.seqNum, pMsg->info.qId); wb->base = (char*)pHead; @@ -879,7 +879,7 @@ int32_t uvMayHandleReleaseResp(SSvrRespMsg* pMsg) { if (pMsg->msg.msgType == TDMT_SCH_TASK_RELEASE && qid > 0) { SSvrRegArg* p = taosHashGet(pConn->pQTable, &qid, sizeof(qid)); if (p == NULL) { - tError("%s conn:%p, already release sid:%" PRId64 "", transLabel(pConn->pInst), pConn, qid); + tError("%s conn:%p, already release sid:%" PRId64, transLabel(pConn->pInst), pConn, qid); return TSDB_CODE_RPC_NO_STATE; } else { transFreeMsg(p->msg.pCont); @@ -1424,7 +1424,7 @@ void uvConnDestroyAllState(SSvrConn* p) { SSvrRegArg* arg = pIter; int64_t* qid = taosHashGetKey(pIter, NULL); (pInst->cfp)(pInst->parent, &(arg->msg), NULL); - tTrace("conn:%p, broken, notify server app, sid:%" PRId64 "", p, *qid); + tTrace("conn:%p, broken, notify server app, sid:%" PRId64, p, *qid); pIter = taosHashIterate(pQTable, pIter); } @@ -1729,7 +1729,7 @@ int32_t uvHandleStateReq(SSvrRespMsg* msg) { int32_t code = 0; SSvrConn* conn = msg->pConn; int64_t qid = msg->msg.info.qId; - tDebug("%s conn:%p, start to register brokenlink callback, sid:%" PRId64 "", transLabel(conn->pInst), conn, qid); + tDebug("%s conn:%p, start to register brokenlink callback, sid:%" PRId64, transLabel(conn->pInst), conn, qid); SSvrRegArg arg = {.notifyCount = 0, .init = 1, .msg = msg->msg}; SSvrRegArg* p = taosHashGet(conn->pQTable, &qid, sizeof(qid)); @@ -1906,7 +1906,7 @@ int32_t transReleaseSrvHandle(void* handle, int32_t status) { m->msg = tmsg; m->type = Normal; - tDebug("%s conn:%p, start to send %s, sid:%" PRId64 "", transLabel(pThrd->pInst), exh->handle, + tDebug("%s conn:%p, start to send %s, sid:%" PRId64, transLabel(pThrd->pInst), exh->handle, TMSG_INFO(tmsg.msgType), qId); if ((code = transAsyncSend(pThrd->asyncPool, &m->q)) != 0) { destroySmsg(m); diff --git a/tools/taos-tools/src/benchCommandOpt.c b/tools/taos-tools/src/benchCommandOpt.c index 9bc57bff01..e2bb3129e1 100644 --- a/tools/taos-tools/src/benchCommandOpt.c +++ b/tools/taos-tools/src/benchCommandOpt.c @@ -367,10 +367,10 @@ static void *queryStableAggrFunc(void *sarg) { if (i == 1) { if (g_arguments->demo_mode) { snprintf(tempS, LARGE_BUFF_LEN, - "groupid = %" PRId64 "", i); + "groupid = %" PRId64, i); } else { snprintf(tempS, LARGE_BUFF_LEN, - "t0 = %" PRId64 "", i); + "t0 = %" PRId64, i); } } else { if (g_arguments->demo_mode) { diff --git a/tools/taos-tools/src/benchData.c b/tools/taos-tools/src/benchData.c index 9b58ad06ca..0925d1002c 100644 --- a/tools/taos-tools/src/benchData.c +++ b/tools/taos-tools/src/benchData.c @@ -2140,7 +2140,7 @@ void generateSmlJsonTags(tools_cJSON *tagsList, uint64_t start_table_from, int tbSeq) { tools_cJSON * tags = tools_cJSON_CreateObject(); char * tbName = benchCalloc(1, TSDB_TABLE_NAME_LEN, true); - snprintf(tbName, TSDB_TABLE_NAME_LEN, "%s%" PRIu64 "", + snprintf(tbName, TSDB_TABLE_NAME_LEN, "%s%" PRIu64, stbInfo->childTblPrefix, start_table_from + tbSeq); char *tagName = benchCalloc(1, TSDB_MAX_TAGS, true); for (int i = 0; i < stbInfo->tags->size; i++) { @@ -2202,7 +2202,7 @@ void generateSmlTaosJsonTags(tools_cJSON *tagsList, SSuperTable *stbInfo, uint64_t start_table_from, int tbSeq) { tools_cJSON * tags = tools_cJSON_CreateObject(); char * tbName = benchCalloc(1, TSDB_TABLE_NAME_LEN, true); - snprintf(tbName, TSDB_TABLE_NAME_LEN, "%s%" PRIu64 "", + snprintf(tbName, TSDB_TABLE_NAME_LEN, "%s%" PRIu64, stbInfo->childTblPrefix, tbSeq + start_table_from); tools_cJSON_AddStringToObject(tags, "id", tbName); char *tagName = benchCalloc(1, TSDB_MAX_TAGS, true); diff --git a/tools/taos-tools/src/benchInsert.c b/tools/taos-tools/src/benchInsert.c index 0ad4c1031a..6108f4990c 100644 --- a/tools/taos-tools/src/benchInsert.c +++ b/tools/taos-tools/src/benchInsert.c @@ -2240,7 +2240,7 @@ static void *syncWriteInterlace(void *sarg) { snprintf( pThreadInfo->lines[generated], stbInfo->lenOfCols + stbInfo->lenOfTags, - "%s %s %" PRId64 "", + "%s %s %" PRId64, pThreadInfo ->sml_tags[(int)tableSeq - pThreadInfo->start_table_from], @@ -2645,7 +2645,7 @@ static int32_t prepareProgressDataSmlLineOrTelnet( snprintf( pThreadInfo->lines[j], stbInfo->lenOfCols + stbInfo->lenOfTags, - "%s %s %" PRId64 "", + "%s %s %" PRId64, pThreadInfo->sml_tags[ti], sampleDataBuf + pos * stbInfo->lenOfCols, *timestamp); @@ -3473,7 +3473,7 @@ static int64_t fillChildTblNameByCount(SSuperTable *stbInfo) { char childName[TSDB_TABLE_NAME_LEN]={0}; snprintf(childName, TSDB_TABLE_NAME_LEN, - "%s%" PRIu64 "", + "%s%" PRIu64, stbInfo->childTblPrefix, i); stbInfo->childTblArray[i]->name = strdup(childName); debugPrint("%s(): %s\n", __func__, @@ -3489,7 +3489,7 @@ static int64_t fillChildTblNameByFromTo(SDataBase *database, char childName[TSDB_TABLE_NAME_LEN]={0}; snprintf(childName, TSDB_TABLE_NAME_LEN, - "%s%" PRIu64 "", + "%s%" PRIu64, stbInfo->childTblPrefix, i); stbInfo->childTblArray[i]->name = strdup(childName); } @@ -3507,13 +3507,13 @@ static int64_t fillChildTblNameByLimitOffset(SDataBase *database, if (g_arguments->taosc_version == 3) { snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, "SELECT DISTINCT(TBNAME) FROM %s.`%s` LIMIT %" PRId64 - " OFFSET %" PRIu64 "", + " OFFSET %" PRIu64, database->dbName, stbInfo->stbName, stbInfo->childTblLimit, stbInfo->childTblOffset); } else { snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, "SELECT TBNAME FROM %s.`%s` LIMIT %" PRId64 - " OFFSET %" PRIu64 "", + " OFFSET %" PRIu64, database->dbName, stbInfo->stbName, stbInfo->childTblLimit, stbInfo->childTblOffset); } diff --git a/tools/taos-tools/src/benchUtil.c b/tools/taos-tools/src/benchUtil.c index f010920dbb..467af0198a 100644 --- a/tools/taos-tools/src/benchUtil.c +++ b/tools/taos-tools/src/benchUtil.c @@ -119,7 +119,7 @@ int getAllChildNameOfSuperTable(TAOS *taos, char *dbName, char *stbName, int64_t childTblCountOfSuperTbl) { char cmd[SHORT_1K_SQL_BUFF_LEN] = "\0"; snprintf(cmd, SHORT_1K_SQL_BUFF_LEN, - "select distinct tbname from %s.`%s` limit %" PRId64 "", + "select distinct tbname from %s.`%s` limit %" PRId64, dbName, stbName, childTblCountOfSuperTbl); TAOS_RES *res = taos_query(taos, cmd); int32_t code = taos_errno(res); diff --git a/tools/taos-tools/src/taosdump.c b/tools/taos-tools/src/taosdump.c index 0d4369d71c..e300a976b9 100644 --- a/tools/taos-tools/src/taosdump.c +++ b/tools/taos-tools/src/taosdump.c @@ -1583,7 +1583,7 @@ int processFieldsValueV2( break; case TSDB_DATA_TYPE_BIGINT: snprintf(tableDes->cols[index].value, COL_VALUEBUF_LEN, - "%" PRId64 "", *((int64_t *)value)); + "%" PRId64, *((int64_t *)value)); break; case TSDB_DATA_TYPE_UTINYINT: snprintf(tableDes->cols[index].value, COL_VALUEBUF_LEN, @@ -1603,7 +1603,7 @@ int processFieldsValueV2( break; case TSDB_DATA_TYPE_UBIGINT: snprintf(tableDes->cols[index].value, COL_VALUEBUF_LEN, - "%" PRIu64 "", *((uint64_t *)value)); + "%" PRIu64, *((uint64_t *)value)); break; case TSDB_DATA_TYPE_FLOAT: { @@ -1769,7 +1769,7 @@ int processFieldsValueV2( break; case TSDB_DATA_TYPE_TIMESTAMP: snprintf(tableDes->cols[index].value, COL_VALUEBUF_LEN, - "%" PRId64 "", *(int64_t *)value); + "%" PRId64, *(int64_t *)value); break; default: errorPrint("%s() LN%d, unknown type: %d\n", @@ -3495,7 +3495,7 @@ int64_t queryDbForDumpOutCount( ? "SELECT COUNT(*) FROM `%s`.%s%s%s WHERE _c0 >= %" PRId64 " " "AND _c0 <= %" PRId64 "" : "SELECT COUNT(*) FROM %s.%s%s%s WHERE _c0 >= %" PRId64 " " - "AND _c0 <= %" PRId64 "", + "AND _c0 <= %" PRId64, dbName, g_escapeChar, tbName, g_escapeChar, startTime, endTime); @@ -6648,7 +6648,7 @@ int processResultValue( case TSDB_DATA_TYPE_BIGINT: return sprintf(pstr + curr_sqlstr_len, - "%" PRId64 "", + "%" PRId64, *((int64_t *)value)); case TSDB_DATA_TYPE_UTINYINT: @@ -6665,7 +6665,7 @@ int processResultValue( case TSDB_DATA_TYPE_UBIGINT: return sprintf(pstr + curr_sqlstr_len, - "%" PRIu64 "", + "%" PRIu64, *((uint64_t *)value)); case TSDB_DATA_TYPE_FLOAT: @@ -6710,7 +6710,7 @@ int processResultValue( } case TSDB_DATA_TYPE_TIMESTAMP: return sprintf(pstr + curr_sqlstr_len, - "%" PRId64 "", *(int64_t *)value); + "%" PRId64, *(int64_t *)value); break; default: break; From 8de0b9a31d2abfab71ec048f7c3f608610209235 Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Tue, 18 Mar 2025 18:30:53 +0800 Subject: [PATCH 11/23] fix: tdengine test badge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 42538519bc..5e768b36ea 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@

-[![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/taosdata/tdengine/taosd-ci-build.yml)](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml) +[![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/taosdata/tdengine/tdengine-test.yml)](https://github.com/taosdata/TDengine/actions/workflows/tdengine-test.yml) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0) [![GitHub commit activity](https://img.shields.io/github/commit-activity/m/taosdata/tdengine)](https://github.com/feici02/TDengine/commits/main/)
From 67b3e301636d86987791317e3d5991a53059a433 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Tue, 18 Mar 2025 18:38:18 +0800 Subject: [PATCH 12/23] ci: fix ci error caused by taosws cargo update (#30251) Close [TD-33938](https://jira.taosdata.com:18080/browse/TD-33938) --- tools/CMakeLists.txt | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 1ee2bc4ce6..110a644e90 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -19,8 +19,7 @@ IF(TD_WEBSOCKET) PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND cargo update - COMMAND RUSTFLAGS=-Ctarget-feature=-crt-static cargo build --release -p taos-ws-sys --features rustls + COMMAND RUSTFLAGS=-Ctarget-feature=-crt-static cargo build --release --locked -p taos-ws-sys --features rustls INSTALL_COMMAND COMMAND cp target/release/${websocket_lib_file} ${CMAKE_BINARY_DIR}/build/lib COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include @@ -38,8 +37,7 @@ IF(TD_WEBSOCKET) PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND cargo update - COMMAND cargo build --release -p taos-ws-sys --features rustls + COMMAND cargo build --release --locked -p taos-ws-sys --features rustls INSTALL_COMMAND COMMAND cp target/release/taosws.dll ${CMAKE_BINARY_DIR}/build/lib COMMAND cp target/release/taosws.dll.lib ${CMAKE_BINARY_DIR}/build/lib/taosws.lib @@ -58,8 +56,7 @@ IF(TD_WEBSOCKET) PATCH_COMMAND COMMAND git clean -f -d BUILD_COMMAND - COMMAND cargo update - COMMAND cargo build --release -p taos-ws-sys --features rustls + COMMAND cargo build --release --locked -p taos-ws-sys --features rustls INSTALL_COMMAND COMMAND cp target/release/${websocket_lib_file} ${CMAKE_BINARY_DIR}/build/lib COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/build/include From c5478b15d3ccad93e048ecdaa2275d0b0a6527ab Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 17 Mar 2025 16:42:55 +0800 Subject: [PATCH 13/23] feat: add inspect tool to installation and removal scripts in packaging --- packaging/tools/install.sh | 71 ++++++++++++++++++++++++-------------- packaging/tools/remove.sh | 6 +++- 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index a6fd69d16f..82c02e9066 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -37,6 +37,7 @@ demoName="${PREFIX}demo" xname="${PREFIX}x" explorerName="${PREFIX}-explorer" keeperName="${PREFIX}keeper" +inspect_name="${PREFIX}inspect" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" @@ -156,12 +157,13 @@ done #echo "verType=${verType} interactiveFqdn=${interactiveFqdn}" -tools=(${clientName} ${benchmarkName} ${dumpName} ${demoName} remove.sh udfd set_core.sh TDinsight.sh start_pre.sh start-all.sh stop-all.sh) +tools=(${clientName} ${benchmarkName} ${dumpName} ${demoName} ${inspect_name} remove.sh udfd set_core.sh TDinsight.sh start_pre.sh start-all.sh stop-all.sh) if [ "${verMode}" == "cluster" ]; then services=(${serverName} ${adapterName} ${xname} ${explorerName} ${keeperName}) elif [ "${verMode}" == "edge" ]; then if [ "${pagMode}" == "full" ]; then services=(${serverName} ${adapterName} ${keeperName} ${explorerName}) + tools=(${clientName} ${benchmarkName} ${dumpName} ${demoName} remove.sh taosudf set_core.sh TDinsight.sh start_pre.sh start-all.sh stop-all.sh) else services=(${serverName}) tools=(${clientName} ${benchmarkName} remove.sh start_pre.sh) @@ -225,6 +227,7 @@ function install_bin() { ${csudo}cp -r ${script_dir}/bin/${clientName} ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/bin/${benchmarkName} ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/bin/${dumpName} ${install_main_dir}/bin + ${csudo}cp -r ${script_dir}/bin/${inspect_name} ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/bin/remove.sh ${install_main_dir}/bin else ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin @@ -521,14 +524,14 @@ function local_fqdn_check() { function install_taosx_config() { [ ! -z $1 ] && return 0 || : # only install client - fileName="${script_dir}/${xname}/etc/${PREFIX}/${xname}.toml" - if [ -f ${fileName} ]; then - ${csudo}sed -i -r "s/#*\s*(fqdn\s*=\s*).*/\1\"${serverFqdn}\"/" ${fileName} + file_name="${script_dir}/${xname}/etc/${PREFIX}/${xname}.toml" + if [ -f ${file_name} ]; then + ${csudo}sed -i -r "s/#*\s*(fqdn\s*=\s*).*/\1\"${serverFqdn}\"/" ${file_name} if [ -f "${configDir}/${xname}.toml" ]; then - ${csudo}cp ${fileName} ${configDir}/${xname}.toml.new + ${csudo}cp ${file_name} ${configDir}/${xname}.toml.new else - ${csudo}cp ${fileName} ${configDir}/${xname}.toml + ${csudo}cp ${file_name} ${configDir}/${xname}.toml fi fi } @@ -538,18 +541,18 @@ function install_explorer_config() { [ ! -z $1 ] && return 0 || : # only install client if [ "$verMode" == "cluster" ]; then - fileName="${script_dir}/${xname}/etc/${PREFIX}/explorer.toml" + file_name="${script_dir}/${xname}/etc/${PREFIX}/explorer.toml" else - fileName="${script_dir}/cfg/explorer.toml" + file_name="${script_dir}/cfg/explorer.toml" fi - if [ -f ${fileName} ]; then - ${csudo}sed -i "s/localhost/${serverFqdn}/g" ${fileName} + if [ -f ${file_name} ]; then + ${csudo}sed -i "s/localhost/${serverFqdn}/g" ${file_name} if [ -f "${configDir}/explorer.toml" ]; then - ${csudo}cp ${fileName} ${configDir}/explorer.toml.new + ${csudo}cp ${file_name} ${configDir}/explorer.toml.new else - ${csudo}cp ${fileName} ${configDir}/explorer.toml + ${csudo}cp ${file_name} ${configDir}/explorer.toml fi fi } @@ -557,14 +560,14 @@ function install_explorer_config() { function install_adapter_config() { [ ! -z $1 ] && return 0 || : # only install client - fileName="${script_dir}/cfg/${adapterName}.toml" - if [ -f ${fileName} ]; then - ${csudo}sed -i -r "s/localhost/${serverFqdn}/g" ${fileName} + file_name="${script_dir}/cfg/${adapterName}.toml" + if [ -f ${file_name} ]; then + ${csudo}sed -i -r "s/localhost/${serverFqdn}/g" ${file_name} if [ -f "${configDir}/${adapterName}.toml" ]; then - ${csudo}cp ${fileName} ${configDir}/${adapterName}.toml.new + ${csudo}cp ${file_name} ${configDir}/${adapterName}.toml.new else - ${csudo}cp ${fileName} ${configDir}/${adapterName}.toml + ${csudo}cp ${file_name} ${configDir}/${adapterName}.toml fi fi } @@ -572,21 +575,21 @@ function install_adapter_config() { function install_keeper_config() { [ ! -z $1 ] && return 0 || : # only install client - fileName="${script_dir}/cfg/${keeperName}.toml" - if [ -f ${fileName} ]; then - ${csudo}sed -i -r "s/127.0.0.1/${serverFqdn}/g" ${fileName} + file_name="${script_dir}/cfg/${keeperName}.toml" + if [ -f ${file_name} ]; then + ${csudo}sed -i -r "s/127.0.0.1/${serverFqdn}/g" ${file_name} if [ -f "${configDir}/${keeperName}.toml" ]; then - ${csudo}cp ${fileName} ${configDir}/${keeperName}.toml.new + ${csudo}cp ${file_name} ${configDir}/${keeperName}.toml.new else - ${csudo}cp ${fileName} ${configDir}/${keeperName}.toml + ${csudo}cp ${file_name} ${configDir}/${keeperName}.toml fi fi } function install_taosd_config() { - fileName="${script_dir}/cfg/${configFile}" - if [ -f ${fileName} ]; then + file_name="${script_dir}/cfg/${configFile}" + if [ -f ${file_name} ]; then ${csudo}sed -i -r "s/#*\s*(fqdn\s*).*/\1$serverFqdn/" ${script_dir}/cfg/${configFile} ${csudo}echo "monitor 1" >>${script_dir}/cfg/${configFile} ${csudo}echo "monitorFQDN ${serverFqdn}" >>${script_dir}/cfg/${configFile} @@ -595,15 +598,27 @@ function install_taosd_config() { fi if [ -f "${configDir}/${configFile}" ]; then - ${csudo}cp ${fileName} ${configDir}/${configFile}.new + ${csudo}cp ${file_name} ${configDir}/${configFile}.new else - ${csudo}cp ${fileName} ${configDir}/${configFile} + ${csudo}cp ${file_name} ${configDir}/${configFile} fi fi ${csudo}ln -sf ${configDir}/${configFile} ${install_main_dir}/cfg } +function install_taosinspect_config() { + file_name="${script_dir}/cfg/inspect.cfg" + if [ -f ${file_name} ]; then + if [ -f "${configDir}/inspect.cfg" ]; then + ${csudo}cp ${file_name} ${configDir}/inspect.cfg.new + else + ${csudo}cp ${file_name} ${configDir}/inspect.cfg + fi + fi + + ${csudo}ln -sf ${configDir}/inspect.cfg ${install_main_dir}/cfg +} function install_config() { @@ -915,6 +930,10 @@ function updateProduct() { install_adapter_config install_taosx_config install_explorer_config + if [ "${verMode}" == "cluster" ]; then + install_taosinspect_config + fi + if [ "${verMode}" != "cloud" ]; then install_keeper_config fi diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 43c2de4ba4..b40b94aa7b 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -44,6 +44,7 @@ dumpName="${PREFIX}dump" keeperName="${PREFIX}keeper" xName="${PREFIX}x" explorerName="${PREFIX}-explorer" +inspect_name="${PREFIX}inspect" tarbitratorName="tarbitratord" productName="TDengine" @@ -58,10 +59,13 @@ config_dir="/etc/${PREFIX}" if [ "${verMode}" == "cluster" ]; then services=(${PREFIX}"d" ${PREFIX}"adapter" ${PREFIX}"keeper") + tools=(${PREFIX} ${PREFIX}"Benchmark" ${PREFIX}"dump" ${PREFIX}"demo" ${PREFIX}"inspect" udfd set_core.sh TDinsight.sh $uninstallScript start-all.sh stop-all.sh) else + tools=(${PREFIX} ${PREFIX}"Benchmark" ${PREFIX}"dump" ${PREFIX}"demo" udfd set_core.sh TDinsight.sh $uninstallScript start-all.sh stop-all.sh) + services=(${PREFIX}"d" ${PREFIX}"adapter" ${PREFIX}"keeper" ${PREFIX}"-explorer") fi -tools=(${PREFIX} ${PREFIX}"Benchmark" ${PREFIX}"dump" ${PREFIX}"demo" udfd set_core.sh TDinsight.sh $uninstallScript start-all.sh stop-all.sh) + csudo="" if command -v sudo >/dev/null; then From aef00a615bf8fe8ae41e425ddc677f2b0a32fc67 Mon Sep 17 00:00:00 2001 From: Simon Guan Date: Tue, 18 Mar 2025 18:55:30 +0800 Subject: [PATCH 14/23] fix: ci errors (#30254) * refactor: adjust wal log infos * refactor: adjust some log's level * refactor: adjust some log's level * fix: add traceId for sync module * refactor: adjust log level * refactor: adjust log level * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * refactor: adjust log levels * docs: update keywords * fix: compile errors in windows * docs: minor changes * docs: minor changes * refactor: minor changes * docs: fminro changes * docs: format * refactr: remove uncessory logs * docs: format doc * fix: ci errors * fix: ci errors --- source/client/src/clientImpl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index efa793ed35..7957736459 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -3021,8 +3021,8 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s } taosMemoryFree(param); - tscDebug("QID:0x%" PRIx64 ", create resultset, taos_query end, req:0x%" PRIx64 " conn:0x%" PRIx64 " res:%p", - pRequest->requestId, pRequest->self, *(int64_t*)taos, pRequest); + tscDebug("QID:0x%" PRIx64 ", taos_query end, conn:0x%" PRIx64 " res:%p", pRequest ? pRequest->requestId : 0, + *(int64_t*)taos, pRequest); return pRequest; } From 3c30345625719e7eace591b42beaf18bc1bcd401 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 17 Mar 2025 20:50:21 +0800 Subject: [PATCH 15/23] feat: add taosinspect configuration and cleanup for client installation --- packaging/tools/install.sh | 5 ++++ packaging/tools/install_client.sh | 38 ++++++++++++++++++++++++++----- packaging/tools/remove_client.sh | 7 +++++- 3 files changed, 43 insertions(+), 7 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 82c02e9066..39d1eb7c72 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -1026,6 +1026,11 @@ function installProduct() { install_adapter_config install_taosx_config install_explorer_config + + if [ "${verMode}" == "cluster" ]; then + install_taosinspect_config + fi + if [ "${verMode}" != "cloud" ]; then install_keeper_config fi diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 9ba9529146..532838fd2b 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -32,6 +32,8 @@ benchmarkName2="${clientName2}Benchmark" dumpName2="${clientName2}dump" demoName2="${clientName2}demo" uninstallScript2="rm${clientName2}" +inspect_name="${clientName2}inspect" + if [ "$osType" != "Darwin" ]; then script_dir=$(dirname $(readlink -f "$0")) @@ -106,12 +108,15 @@ function install_main_path() { function install_bin() { # Remove links - ${csudo}rm -f ${bin_link_dir}/${clientName} || : + ${csudo}rm -f ${bin_link_dir}/${clientName} || : if [ "$osType" != "Darwin" ]; then ${csudo}rm -f ${bin_link_dir}/taosdemo || : + ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : fi - ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : - ${csudo}rm -f ${bin_link_dir}/set_core || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/set_core || : + ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : + ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* @@ -119,6 +124,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${clientName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName2} ${bin_link_dir}/${clientName2} || : if [ "$osType" != "Darwin" ]; then [ -x ${install_main_dir}/bin/${demoName2} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName2} ${bin_link_dir}/${demoName2} || : + [ -x ${install_main_dir}/bin/${inspect_name} ] && ${csudo}ln -s ${install_main_dir}/bin/${inspect_name} ${bin_link_dir}/${inspect_name} || : fi [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : @@ -237,16 +243,34 @@ function install_jemalloc() { } function install_config() { - if [ ! -f ${cfg_install_dir}/${configFile} ]; then + file_name=${cfg_install_dir}/${configFile} + if [ -f ${file_name} ]; then + echo "The configuration file ${file_name} already exists" + ${csudo}cp ${file_name} ${cfg_install_dir}/${configFile}.new + else ${csudo}mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir} ${csudo}chmod 644 ${cfg_install_dir}/* + ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg fi - ${csudo}cp -f ${script_dir}/cfg/${configFile} ${install_main_dir}/cfg/${configFile}.org - ${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg + } +function install_taosinspect_config() { + file_name="${script_dir}/cfg/inspect.cfg" + if [ -f ${file_name} ]; then + if [ -f "${cfg_install_dir}/inspect.cfg" ]; then + ${csudo}cp ${file_name} ${cfg_install_dir}/inspect.cfg.new + else + ${csudo}mkdir -p ${cfg_install_dir} + ${csudo}cp ${file_name} ${cfg_install_dir}/inspect.cfg + fi + ${csudo}ln -sf ${cfg_install_dir}/inspect.cfg ${install_main_dir}/cfg + fi + + +} function install_log() { ${csudo}rm -rf ${log_dir} || : @@ -293,6 +317,7 @@ function update_TDengine() { install_jemalloc if [ "$verMode" == "cluster" ]; then install_connector + install_taosinspect_config fi install_examples install_bin @@ -320,6 +345,7 @@ function install_TDengine() { install_jemalloc if [ "$verMode" == "cluster" ]; then install_connector + install_taosinspect_config fi install_examples install_bin diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 1d2965f66b..33454d7512 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -7,6 +7,7 @@ set -e RED='\033[0;31m' GREEN='\033[1;32m' NC='\033[0m' +verMode=edge installDir="/usr/local/taos" clientName="taos" @@ -18,8 +19,10 @@ productName2="TDengine" benchmarkName2="${clientName2}Benchmark" demoName2="${clientName2}demo" dumpName2="${clientName2}dump" +inspect_name="${clientName2}inspect" uninstallScript2="rm${clientName2}" + installDir="/usr/local/${clientName2}" #install main path @@ -52,8 +55,9 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${demoName2} || : ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : - ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : + ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : ${csudo}rm -f ${bin_link_dir}/set_core || : + [ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then ${csudo}rm -f ${bin_link_dir}/${clientName2} || : @@ -61,6 +65,7 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : + [ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : fi } From f4a1db9f6f900f046604a43009854d4d5eabf602 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 18 Mar 2025 18:59:49 +0800 Subject: [PATCH 16/23] enh: supports silent uninstallation of the tar package using the -e parameter --- packaging/tools/remove.sh | 63 +++++++++++++++++++++++++------- packaging/tools/remove_client.sh | 4 +- 2 files changed, 51 insertions(+), 16 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index b40b94aa7b..a0a1c235c0 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -162,9 +162,9 @@ remove_service_of() { remove_tools_of() { _tool=$1 kill_service_of ${_tool} - [ -e "${bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${bin_link_dir}/${_tool} || : + [ -L "${bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${bin_link_dir}/${_tool} || : [ -e "${installDir}/bin/${_tool}" ] && ${csudo}rm -rf ${installDir}/bin/${_tool} || : - [ -e "${local_bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${local_bin_link_dir}/${_tool} || : + [ -L "${local_bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${local_bin_link_dir}/${_tool} || : } remove_bin() { @@ -236,21 +236,56 @@ function remove_data_and_config() { [ -d "${log_dir}" ] && ${csudo}rm -rf ${log_dir} } -echo -echo "Do you want to remove all the data, log and configuration files? [y/n]" -read answer -remove_flag=false -if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then - confirmMsg="I confirm that I would like to delete all data, log and configuration files" - echo "Please enter '${confirmMsg}' to continue" +function usage() { + echo -e "\nUsage: $(basename $0) [-e ]" + echo "-e: silent mode, specify whether to remove all the data, log and configuration files." + echo " yes: remove the data, log, and configuration files." + echo " no: don't remove the data, log, and configuration files." +} + +# main +interactive_remove="yes" +remove_flag="false" + +while getopts "e:h" opt; do + case $opt in + e) + interactive_remove="no" + + if [ "$OPTARG" == "yes" ]; then + remove_flag="true" + echo "Remove all the data, log, and configuration files." + elif [ "$OPTARG" == "no" ]; then + remove_flag="false" + echo "Do not remove the data, log, and configuration files." + else + echo "Invalid option for -e: $OPTARG" + usage + exit 1 + fi + ;; + h | *) + usage + exit 1 + ;; + esac +done + +if [ "$interactive_remove" == "yes" ]; then + echo -e "\nDo you want to remove all the data, log and configuration files? [y/n]" read answer - if [ X"$answer" == X"${confirmMsg}" ]; then - remove_flag=true - else - echo "answer doesn't match, skip this step" + if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then + confirmMsg="I confirm that I would like to delete all data, log and configuration files" + echo "Please enter '${confirmMsg}' to continue" + read answer + if [ X"$answer" == X"${confirmMsg}" ]; then + remove_flag="true" + else + echo "answer doesn't match, skip this step" + fi fi + echo fi -echo if [ -e ${install_main_dir}/uninstall_${PREFIX}x.sh ]; then if [ X$remove_flag == X"true" ]; then diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 33454d7512..a7eb225704 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -57,7 +57,7 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : ${csudo}rm -f ${bin_link_dir}/set_core || : - [ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : + [ -L ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then ${csudo}rm -f ${bin_link_dir}/${clientName2} || : @@ -65,7 +65,7 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || : ${csudo}rm -f ${bin_link_dir}/${dumpName2} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || : - [ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : + [ -L ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || : fi } From fd813aa62d3c9aedf1d9df42ae54b0193a6a1e4b Mon Sep 17 00:00:00 2001 From: dongming chen Date: Wed, 19 Mar 2025 09:09:16 +0800 Subject: [PATCH 17/23] fix: TS-6126-change-error-msg (#30186) --- include/util/taoserror.h | 1 + source/util/src/terror.c | 1 + 2 files changed, 2 insertions(+) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index c747d86286..28cb7d9010 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -367,6 +367,7 @@ int32_t taosGetErrSize(); #define TSDB_CODE_MND_INVALID_WAL_LEVEL TAOS_DEF_ERROR_CODE(0, 0x039C) #define TSDB_CODE_MND_INVALID_DNODE_LIST_FMT TAOS_DEF_ERROR_CODE(0, 0x039D) #define TSDB_CODE_MND_DNODE_LIST_REPEAT TAOS_DEF_ERROR_CODE(0, 0x039E) +#define TSDB_CODE_MND_NO_VGROUP_ON_DB TAOS_DEF_ERROR_CODE(0, 0x039F) // mnode-node #define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03A0) diff --git a/source/util/src/terror.c b/source/util/src/terror.c index e7864bee5d..f64bd7c6ea 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -277,6 +277,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_ENCRYPT_NOT_ALLOW_CHANGE, "Encryption is not all TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_WAL_LEVEL, "Invalid option, wal_level 0 should be used with replica 1") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_LIST_FMT, "Invalid dnode list format") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_LIST_REPEAT, "Duplicate items in the dnode list") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_VGROUP_ON_DB, "No enough vgroup on db to execute") // mnode-node TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_ALREADY_EXIST, "Mnode already exists") From fd09c267a967f1096bd9c7d20fa150eff1d08c1e Mon Sep 17 00:00:00 2001 From: Mario Peng <48949600+Pengrongkun@users.noreply.github.com> Date: Wed, 19 Mar 2025 10:09:00 +0800 Subject: [PATCH 18/23] fix(stmt2):fix init db is NULL problem (#30256) * fix: stmt2 interlace mode db is NULL * fix: add test and fix one interlace no pk error --- source/client/src/clientStmt2.c | 3 +++ source/client/test/stmt2Test.cpp | 3 ++- source/common/src/tdataformat.c | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 994be0c2bd..cfd302d895 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -889,6 +889,9 @@ static int stmtSetDbName2(TAOS_STMT2* stmt, const char* dbName) { if (pStmt->exec.pRequest->pDb == NULL) { return terrno; } + if (pStmt->sql.stbInterlaceMode) { + pStmt->sql.siInfo.dbname = pStmt->db; + } return TSDB_CODE_SUCCESS; } diff --git a/source/client/test/stmt2Test.cpp b/source/client/test/stmt2Test.cpp index c7ae20b096..cc54cd55d3 100644 --- a/source/client/test/stmt2Test.cpp +++ b/source/client/test/stmt2Test.cpp @@ -1167,6 +1167,7 @@ TEST(stmt2Case, stmt2_insert_non_statndard) { } // TD-33419 +// TD-34075 TEST(stmt2Case, stmt2_insert_db) { TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0); ASSERT_NE(taos, nullptr); @@ -1177,7 +1178,7 @@ TEST(stmt2Case, stmt2_insert_db) { "INSERT INTO `stmt2_testdb_12`.`stb1` (ts,int_tag,tbname) VALUES " "(1591060627000,1,'tb1')(1591060627000,2,'tb2')"); - TAOS_STMT2_OPTION option = {0, false, false, NULL, NULL}; + TAOS_STMT2_OPTION option = {0, true, true, NULL, NULL}; TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); ASSERT_NE(stmt, nullptr); diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index ada69fd350..7b27840bae 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -3295,6 +3295,10 @@ int32_t tRowBuildFromBind2(SBindInfo2 *infos, int32_t numOfInfos, bool infoSorte for (int32_t iInfo = 0; iInfo < numOfInfos; iInfo++) { if (infos[iInfo].bind->is_null && infos[iInfo].bind->is_null[iRow]) { if (infos[iInfo].bind->is_null[iRow] == 1) { + if(iInfo == 0) { + code = TSDB_CODE_PAR_PRIMARY_KEY_IS_NULL; + goto _exit; + } colVal = COL_VAL_NULL(infos[iInfo].columnId, infos[iInfo].type); } else { colVal = COL_VAL_NONE(infos[iInfo].columnId, infos[iInfo].type); From 7c3d6a35b4516fa3186b9b70b98835ef200676a8 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Wed, 19 Mar 2025 10:10:06 +0800 Subject: [PATCH 19/23] docs: add password tutorial for all components (#30253) Close [TS-6000](https://jira.taosdata.com:18080/browse/TS-6000) --- docs/en/27-train-faq/03-password.md | 296 ++++++++++++++++++++++++++++ docs/zh/27-train-faq/03-password.md | 296 ++++++++++++++++++++++++++++ 2 files changed, 592 insertions(+) create mode 100644 docs/en/27-train-faq/03-password.md create mode 100644 docs/zh/27-train-faq/03-password.md diff --git a/docs/en/27-train-faq/03-password.md b/docs/en/27-train-faq/03-password.md new file mode 100644 index 0000000000..f6029b5092 --- /dev/null +++ b/docs/en/27-train-faq/03-password.md @@ -0,0 +1,296 @@ +--- +title: Usage of Special Characters in Passwords +description: Usage of special characters in user passwords in TDengine +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +TDengine user passwords must meet the following rules: + +1. The username must not exceed 23 bytes. +2. The password length must be between 8 and 255 characters. +3. The range of password characters: + 1. Uppercase letters: `A-Z` + 2. Lowercase letters: `a-z` + 3. Numbers: `0-9` + 4. Special characters: `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .` +4. When strong password is enabled (EnableStrongPassword 1, enabled by default), the password must contain at least three of the following categories: uppercase letters, lowercase letters, numbers, and special characters. When not enabled, there are no restrictions on character types. + +## Usage Guide for Special Characters in Different Components + +Take the username `user1` and password `Ab1!@#$%^&*()-_+=[]{}` as an example. + +```sql +CREATE USER user1 PASS 'Ab1!@#$%^&*()-_+=[]{}'; +``` + + + + +In the [TDengine Command Line Interface (CLI)](../../tdengine-reference/tools/tdengine-cli/), note the following: + +- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered. +- If the `-p` parameter is used with a password, and the password contains special characters, single quotes must be used. + +Login with user `user1`: + +```shell +taos -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' +taos -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} +``` + + + + +In [taosdump](../../tdengine-reference/tools/taosdump/), note the following: + +- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered. +- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used. + +Backup database `test` with user `user1`: + +```shell +taosdump -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -D test +taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test +``` + + + + +In [taosBenchmark](../../tdengine-reference/tools/taosbenchmark/), note the following: + +- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered. +- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used. + +Example of data write test with user `user1`: + +```shell +taosBenchmark -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -d test -y +``` + +When using `taosBenchmark -f `, there are no restrictions on the password in the JSON file. + + + + +[taosX](../../tdengine-reference/components/taosx/) uses DSN to represent TDengine connections, in the format: `(taos|tmq)[+ws]://:@:`, where `` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`. + +Example of exporting data with user `user1`: + +```shell +taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' -t 'csv:./test.csv' +``` + +Note that if the password can be URL decoded, the URL decoded result will be used as the password. For example: `taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` is equivalent to `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041`. + + + + + +No special handling is required in [Explorer](../../tdengine-reference/components/taosexplorer/), just use it directly. + + + + + +When using special character passwords in JDBC, the password needs to be URL encoded, as shown below: + +```java +package com.taosdata.example; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; + +import com.taosdata.jdbc.TSDBDriver; + +public class JdbcPassDemo { + public static void main(String[] args) throws Exception { + String password = "Ab1!@#$%^&*()-_+=[]{}"; + String encodedPassword = URLEncoder.encode(password, StandardCharsets.UTF_8.toString()); + String jdbcUrl = "jdbc:TAOS-WS://localhost:6041"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "user1"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, encodedPassword); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) { + System.out.println("Connected to " + jdbcUrl + " successfully."); + + // you can use the connection for execute SQL here + + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } +} +``` + + + + +No special handling is required for special character passwords in Python, as shown below: + +```python +import taos +import taosws + + +def create_connection(): + host = "localhost" + port = 6030 + return taos.connect( + user="user1", + password="Ab1!@#$%^&*()-_+=[]{}", + host=host, + port=port, + ) + +def create_ws_connection(): + host = "localhost" + port = 6041 + return taosws.connect( + user="user1", + password="Ab1!@#$%^&*()-_+=[]{}", + host=host, + port=port, + ) + + +def show_databases(conn): + cursor = conn.cursor() + cursor.execute("show databases") + print(cursor.fetchall()) + cursor.close() + + +if __name__ == "__main__": + print("Connect with native protocol") + conn = create_connection() + show_databases(conn) + print("Connect with websocket protocol") + conn = create_ws_connection() + show_databases(conn) + +``` + + + + + +Starting from version 3.6.0, Go supports passwords containing special characters, which need to be encoded using encodeURIComponent. + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + "net/url" + + _ "github.com/taosdata/driver-go/v3/taosWS" +) + +func main() { + var user = "user1" + var password = "Ab1!@#$%^&*()-_+=[]{}" + var encodedPassword = url.QueryEscape(password) + var taosDSN = user + ":" + encodedPassword + "@ws(localhost:6041)/" + taos, err := sql.Open("taosWS", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) + } + fmt.Println("Connected to " + taosDSN + " successfully.") + defer taos.Close() +} +``` + + + + +In Rust, DSN is used to represent TDengine connections, in the format: `(taos|tmq)[+ws]://:@:`, where `` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`. + +```rust +let dsn = "taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041"; +let connection = TaosBuilder::from_dsn(&dsn)?.build().await?; +``` + + + + +```js +const taos = require("@tdengine/websocket"); + +let dsn = 'ws://localhost:6041'; +async function createConnect() { + try { + let conf = new taos.WSConfig(dsn); + conf.setUser('user1'); + conf.setPwd('Ab1!@#$%^&*()-_+=[]{}'); + conf.setDb('test'); + conn = await taos.sqlConnect(conf); + console.log("Connected to " + dsn + " successfully."); + return conn; + } catch (err) { + console.log("Failed to connect to " + dsn + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); + throw err; + } +} + +createConnect() +``` + + + + +When using passwords in C#, note that connection strings do not support semicolons (as semicolons are delimiters). In this case, you can construct the `ConnectionStringBuilder` without a password, and then set the username and password. + +As shown below: + +```csharp +var builder = new ConnectionStringBuilder("host=localhost;port=6030"); +builder.Username = "user1"; +builder.Password = "Ab1!@#$%^&*()-_+=[]{}"; +using (var client = DbDriver.Open(builder)){} +``` + + + + +There are no restrictions on passwords in C. + +```c +TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6030); +``` + + + + +When using passwords in REST API, note the following: + +- Passwords use Basic Auth, in the format `Authorization: Basic base64(:)`. +- Passwords containing colons `:` are not supported. + +The following two methods are equivalent: + +```shell +curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' -d 'show databases' http://localhost:6041/rest/sql +curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' -d 'show databases' http://localhost:6041/rest/sql +``` + + + diff --git a/docs/zh/27-train-faq/03-password.md b/docs/zh/27-train-faq/03-password.md new file mode 100644 index 0000000000..7ea215999d --- /dev/null +++ b/docs/zh/27-train-faq/03-password.md @@ -0,0 +1,296 @@ +--- +title: 密码中特殊字符的使用 +description: TDengine 用户密码中特殊字符的使用 +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +TDengine 用户密码需满足以下规则: + +1. 用户名最长不超过 23 个字节。 +2. 密码长度必须为 8 到 255 位。 +3. 密码字符的取值范围 + 1. 大写字母:`A-Z` + 2. 小写字母:`a-z` + 3. 数字:`0-9` + 4. 特殊字符: `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .` +4. 强密码启用(EnableStrongPassword 1,默认开启)时,至少包含大写字母、小写字母、数字、特殊字符中的三类,不启用时,字符种类不做约束。 + +## 各组件特殊字符使用指南 + +以用户名 `user1`,密码 `Ab1!@#$%^&*()-_+=[]{}` 为例。 + +```sql +CREATE USER user1 PASS 'Ab1!@#$%^&*()-_+=[]{}'; +``` + + + + +在 [TDengine 命令行客户端(CLI)](../../reference/tools/taos-cli/) 中使用需要注意以下几点: + +- 使用参数 `-p` 后不带密码,会提示输入密码,可输入任意可接收字符。 +- 使用参数 `-p` 后带密码,如果密码中包含特殊字符,需使用单引号。 + +使用用户 `user1` 登录: + +```shell +taos -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' +taos -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} +``` + + + + +在 [taosdump](../../reference/tools/taosdump/) 中使用需要注意以下几点: + +- 使用参数 `-p` 后不带密码,会提示输入密码,可输入任意可接收字符。 +- 使用参数 `-p` 后带密码,如果密码中包含特殊字符,需使用单引号或进行转义。 + +使用用户 `user1` 备份数据库 `test`: + +```shell +taosdump -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -D test +taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test +``` + + + + +在 [taosBenchmark](../../reference/tools/taosbenchmark/) 中使用需要注意以下几点: + +- 使用参数 `-p` 后不带密码,会提示输入密码,可输入任意可接收字符。 +- 使用参数 `-p` 后带密码,如果密码中包含特殊字符,需使用单引号或进行转义。 + +使用用户 `user1` 进行数据写入测试示例如下: + +```shell +taosBenchmark -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -d test -y +``` + +使用 `taosBenchmark -f ` 方式时,JSON 文件中密码使用无限制。 + + + + +[taosX](../../reference/components/taosx/) 使用 DSN 表示 TDengine 连接,使用如下格式:`(taos|tmq)[+ws]://:@:`,其中 `` 可以包含特殊字符,如:`taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`。 + +使用用户 `user1` 导出数据示例如下: + +```shell +taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' -t 'csv:./test.csv' +``` + +需要注意的是,如果密码可被 URL decode,则会使用 URL decoded 结果作为密码。如:`taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` 与 `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041` 是等价的。 + + + + + +在 [Explorer](../../reference/components/explorer/) 中无需特殊处理,直接使用即可。 + + + + + +在 JDBC 中使用特殊字符密码时,密码需要通过 URL 编码,示例如下: + +```java +package com.taosdata.example; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; + +import com.taosdata.jdbc.TSDBDriver; + +public class JdbcPassDemo { + public static void main(String[] args) throws Exception { + String password = "Ab1!@#$%^&*()-_+=[]{}"; + String encodedPassword = URLEncoder.encode(password, StandardCharsets.UTF_8.toString()); + String jdbcUrl = "jdbc:TAOS-WS://localhost:6041"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "user1"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, encodedPassword); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) { + System.out.println("Connected to " + jdbcUrl + " successfully."); + + // you can use the connection for execute SQL here + + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } + } +} +``` + + + + +在 Python 中使用特殊字符密码无需特殊处理,示例如下: + +```python +import taos +import taosws + + +def create_connection(): + host = "localhost" + port = 6030 + return taos.connect( + user="user1", + password="Ab1!@#$%^&*()-_+=[]{}", + host=host, + port=port, + ) + +def create_ws_connection(): + host = "localhost" + port = 6041 + return taosws.connect( + user="user1", + password="Ab1!@#$%^&*()-_+=[]{}", + host=host, + port=port, + ) + + +def show_databases(conn): + cursor = conn.cursor() + cursor.execute("show databases") + print(cursor.fetchall()) + cursor.close() + + +if __name__ == "__main__": + print("Connect with native protocol") + conn = create_connection() + show_databases(conn) + print("Connect with websocket protocol") + conn = create_ws_connection() + show_databases(conn) + +``` + + + + + +从 3.6.0 版本开始,Go 语言中支持密码中包含特殊字符,使用时需要 encodeURIComponent 编码。 + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + "net/url" + + _ "github.com/taosdata/driver-go/v3/taosWS" +) + +func main() { + var user = "user1" + var password = "Ab1!@#$%^&*()-_+=[]{}" + var encodedPassword = url.QueryEscape(password) + var taosDSN = user + ":" + encodedPassword + "@ws(localhost:6041)/" + taos, err := sql.Open("taosWS", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) + } + fmt.Println("Connected to " + taosDSN + " successfully.") + defer taos.Close() +} +``` + + + + +Rust 中使用 DSN 表示 TDengine 连接,使用如下格式:`(taos|tmq)[+ws]://:@:`,其中 `` 可以包含特殊字符,如:`taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`。 + +```rust +let dsn = "taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041"; +let connection = TaosBuilder::from_dsn(&dsn)?.build().await?; +``` + + + + +```js +const taos = require("@tdengine/websocket"); + +let dsn = 'ws://localhost:6041'; +async function createConnect() { + try { + let conf = new taos.WSConfig(dsn); + conf.setUser('user1'); + conf.setPwd('Ab1!@#$%^&*()-_+=[]{}'); + conf.setDb('test'); + conn = await taos.sqlConnect(conf); + console.log("Connected to " + dsn + " successfully."); + return conn; + } catch (err) { + console.log("Failed to connect to " + dsn + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); + throw err; + } +} + +createConnect() +``` + + + + +在 C# 中使用密码时,需要注意:使用连接字符串时,不支持分号(因分号为分隔符);此时可使用不带密码的字符串构建 `ConnectionStringBuilder`,之后再设置用户名和密码。 + +示例如下: + +```csharp +var builder = new ConnectionStringBuilder("host=localhost;port=6030"); +builder.Username = "user1"; +builder.Password = "Ab1!@#$%^&*()-_+=[]{}"; +using (var client = DbDriver.Open(builder)){} +``` + + + + +C 语言中使用密码无限制。 + +```c +TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6030); +``` + + + + +REST API 中使用密码时,需要注意以下几点: + +- 密码使用 Basic Auth,格式为 `Authorization: Basic base64(:)`。 +- 不支持密码中包含冒号 `:`。 + +以下两种方式等价: + +```shell +curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' -d 'show databases' http://localhost:6041/rest/sql +curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' -d 'show databases' http://localhost:6041/rest/sql +``` + + + From 3fafecb24274c76fe9c3bad72525320f7c710945 Mon Sep 17 00:00:00 2001 From: WANG MINGMING Date: Wed, 19 Mar 2025 10:22:06 +0800 Subject: [PATCH 20/23] feat[TS-6137]: support sliding in force_window_close (#30203) * feat[TS-6137]: support sliding in force_window_close * feat[TS-6137]: support sliding in force_window_close * feat(stream): force window close support interval sliding * feat[TS-6137]: support sliding in force_window_close * feat[TS-6137]: support sliding in force_window_close * feat[TS-6137]: support sliding in force_window_close --------- Co-authored-by: 54liuyao <54liuyao@163.com> --- docs/en/06-advanced/03-stream.md | 7 ++- docs/zh/06-advanced/03-stream.md | 6 ++- source/libs/executor/src/scanoperator.c | 2 +- .../src/streamintervalsliceoperator.c | 4 +- source/libs/parser/src/parTranslater.c | 35 +++------------ source/libs/stream/src/streamData.c | 4 +- source/libs/stream/src/streamSched.c | 1 - tests/parallel_test/cases.task | 1 + tests/parallel_test/cases_tdengine.task | 1 + tests/script/tsim/stream/forcewindowclose.sim | 44 ++++++++++++++++++- .../script/tsim/stream/streamInterpError.sim | 6 +-- .../tsim/stream/streamInterpFwcError.sim | 4 +- tests/script/tsim/stream/streamTwaError.sim | 2 +- .../8-stream/force_window_close_interp.py | 20 ++++----- 14 files changed, 81 insertions(+), 56 deletions(-) diff --git a/docs/en/06-advanced/03-stream.md b/docs/en/06-advanced/03-stream.md index 9c8dd44655..9920168220 100644 --- a/docs/en/06-advanced/03-stream.md +++ b/docs/en/06-advanced/03-stream.md @@ -142,8 +142,11 @@ When creating a stream, you can specify the trigger mode of stream computing thr 1. AT_ONCE: Triggered immediately upon writing. 2. WINDOW_CLOSE: Triggered when the window closes (the closing of the window is determined by the event time, can be used in conjunction with watermark). 3. MAX_DELAY time: If the window closes, computation is triggered. If the window has not closed, and the duration since it has not closed exceeds the time specified by max delay, computation is triggered. -4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE. - +4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does support sliding); In this mode, FILL_HISTORY is automatically set to 0, IGNORE EXPIRED is automatically set to 1 and IGNORE UPDATE is automatically set to 1; FILL only supports PREV, NULL, NONE, VALUE. + - This mode can be used to implement continuous queries, such as creating a stream that queries the number of data entries in the past 10 seconds window every 1 second。SQL as follows: + ```sql + create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s) + ``` The closing of the window is determined by the event time, such as when the event stream is interrupted or continuously delayed, at which point the event time cannot be updated, possibly leading to outdated computation results. Therefore, stream computing provides the MAX_DELAY trigger mode that combines event time with processing time: MAX_DELAY mode triggers computation immediately when the window closes, and its unit can be specified, specific units: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). Additionally, when data is written, if the time that triggers computation exceeds the time specified by MAX_DELAY, computation is triggered immediately. diff --git a/docs/zh/06-advanced/03-stream.md b/docs/zh/06-advanced/03-stream.md index 4bd5d517fd..1bac7f826c 100644 --- a/docs/zh/06-advanced/03-stream.md +++ b/docs/zh/06-advanced/03-stream.md @@ -131,7 +131,11 @@ create stream if not exists count_history_s fill_history 1 into count_history as 1. AT_ONCE:写入立即触发。 2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用)。 3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。 -4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(不支持滑动);FILL_HISTORY必须为 0,IGNORE EXPIRED 必须为 1,IGNORE UPDATE 必须为 1;FILL 只支持 PREV 、NULL、 NONE、VALUE。 +4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(支持滑动);该模式时,FILL_HISTORY 自动设置为 0,IGNORE EXPIRED 自动设置为 1,IGNORE UPDATE 自动设置为 1;FILL 只支持 PREV 、NULL、 NONE、VALUE。 + - 该模式可用于实现连续查询,比如,创建一个流,每隔 1s 查询一次过去 10s 窗口内的数据条数。SQL 如下: + ```sql + create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s) + ``` 窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。 diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 7b54ef878c..07dc3a31c5 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3981,7 +3981,7 @@ FETCH_NEXT_BLOCK: pInfo->blockType = STREAM_INPUT__DATA_SUBMIT; pInfo->updateResIndex = 0; pInfo->lastScanRange = pBlock->info.window; - TSKEY endKey = taosTimeGetIntervalEnd(pBlock->info.window.skey, &pInfo->interval); + TSKEY endKey = getNextTimeWindowStart(&pInfo->interval, pBlock->info.window.skey, TSDB_ORDER_ASC) - 1; if (pInfo->useGetResultRange == true) { endKey = pBlock->info.window.ekey; } diff --git a/source/libs/executor/src/streamintervalsliceoperator.c b/source/libs/executor/src/streamintervalsliceoperator.c index 277cbda36b..60c7ea867d 100644 --- a/source/libs/executor/src/streamintervalsliceoperator.c +++ b/source/libs/executor/src/streamintervalsliceoperator.c @@ -441,7 +441,7 @@ static int32_t doStreamIntervalSliceNext(SOperatorInfo* pOperator, SSDataBlock** return code; } - pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState, 1, INT64_MAX); + pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState, pInfo->nbSup.numOfKeep, pInfo->nbSup.tsOfKeep); setStreamOperatorCompleted(pOperator); (*ppRes) = NULL; return code; @@ -533,7 +533,7 @@ static int32_t doStreamIntervalSliceNext(SOperatorInfo* pOperator, SSDataBlock** (*ppRes) = pInfo->pCheckpointRes; return code; } - pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState, 1, INT64_MAX); + pAggSup->stateStore.streamStateClearExpiredState(pAggSup->pState, pInfo->nbSup.numOfKeep, pInfo->nbSup.tsOfKeep); setStreamOperatorCompleted(pOperator); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8f2758fe7a..319149716f 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -12608,10 +12608,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm } if (pStmt->pOptions->triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { - if (pStmt->pOptions->fillHistory) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream interp unsupported Fill history"); - } else if (pSelect->pFill != NULL) { + pStmt->pOptions->fillHistory = 0; + if (pSelect->pFill != NULL) { EFillMode mode = ((SFillNode*)(pSelect->pFill))->mode; if (mode == FILL_MODE_NEXT) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, @@ -12660,32 +12658,9 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm } if (pStmt->pOptions->triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { - if (pStmt->pOptions->fillHistory) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream unsupported Fill history"); - } - - if (pStmt->pOptions->ignoreExpired != 1) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream must not set ignore expired 0"); - } - - if (pStmt->pOptions->ignoreUpdate != 1) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream must not set ignore update 0"); - } - - if (pSelect->pWindow != NULL && QUERY_NODE_INTERVAL_WINDOW == nodeType(pSelect->pWindow)) { - SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; - if (NULL != pWindow->pSliding) { - int64_t interval = ((SValueNode*)pWindow->pInterval)->datum.i; - int64_t sliding = ((SValueNode*)pWindow->pSliding)->datum.i; - if (interval != sliding) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "When trigger was force window close, Stream unsupported sliding"); - } - } - } + pStmt->pOptions->fillHistory = 0; + pStmt->pOptions->ignoreExpired = 1; + pStmt->pOptions->ignoreUpdate = 1; if ((SRealTableNode*)pSelect->pFromTable && ((SRealTableNode*)pSelect->pFromTable)->pMeta && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index aa1f56af33..4faef0594b 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -318,7 +318,7 @@ int32_t streamCreateForcewindowTrigger(SStreamTrigger** pTrigger, int32_t interv SStreamTrigger* p = NULL; int64_t ts = taosGetTimestamp(pInterval->precision); - int64_t skey = pLatestWindow->skey + interval; + int64_t skey = pLatestWindow->skey + pInterval->sliding; int32_t code = taosAllocateQitem(sizeof(SStreamTrigger), DEF_QITEM, 0, (void**)&p); if (code) { @@ -334,7 +334,7 @@ int32_t streamCreateForcewindowTrigger(SStreamTrigger** pTrigger, int32_t interv } p->pBlock->info.window.skey = skey; - p->pBlock->info.window.ekey = TMAX(ts, skey + interval); + p->pBlock->info.window.ekey = TMAX(ts, skey + pInterval->interval); p->pBlock->info.type = STREAM_GET_RESULT; stDebug("s-task:%s force_window_close trigger block generated, window range:%" PRId64 "-%" PRId64, id, diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 6a9409959b..7319953d35 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -60,7 +60,6 @@ void streamSetupScheduleTrigger(SStreamTask* pTask) { stError("s-task:%s failed to init scheduler info, code:%s", id, tstrerror(code)); return; } - pTask->info.delaySchedParam = interval.sliding; pTask->info.watermark = waterMark; pTask->info.interval = interval; diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 098075db39..f0c547c3cf 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1638,6 +1638,7 @@ ,,y,script,./test.sh -f tsim/stream/streamInterpError.sim ,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose.sim ,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose1.sim +,,y,script,./test.sh -f tsim/stream/forcewindowclose.sim ,,y,script,./test.sh -f tsim/stream/streamInterpFwcError.sim ,,y,script,./test.sh -f tsim/stream/streamInterpHistory.sim #,,y,script,./test.sh -f tsim/stream/streamInterpHistory1.sim diff --git a/tests/parallel_test/cases_tdengine.task b/tests/parallel_test/cases_tdengine.task index 2983df6a05..e5732957a1 100644 --- a/tests/parallel_test/cases_tdengine.task +++ b/tests/parallel_test/cases_tdengine.task @@ -1351,6 +1351,7 @@ ,,y,script,./test.sh -f tsim/stream/streamInterpError.sim ,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose.sim ,,y,script,./test.sh -f tsim/stream/streamInterpForceWindowClose1.sim +,,y,script,./test.sh -f tsim/stream/forcewindowclose.sim ,,y,script,./test.sh -f tsim/stream/streamInterpFwcError.sim ,,y,script,./test.sh -f tsim/stream/streamInterpHistory.sim ,,y,script,./test.sh -f tsim/stream/streamInterpLarge.sim diff --git a/tests/script/tsim/stream/forcewindowclose.sim b/tests/script/tsim/stream/forcewindowclose.sim index ab54278e39..46fa9192ed 100644 --- a/tests/script/tsim/stream/forcewindowclose.sim +++ b/tests/script/tsim/stream/forcewindowclose.sim @@ -4,10 +4,51 @@ system sh/exec.sh -n dnode1 -s start sleep 50 sql connect +print ===================================== force window close with sliding test +print ============ create db +sql create database test1 vgroups 2 precision 'us'; + +sql use test1 +sql create stable st1(ts timestamp, a int) tags(t int); +sql create table tu11 using st1 tags(1); + +sql_error create stream stream11 trigger force_window_close into str_dst1 as select _wstart, count(*) from st1 partition by tbname interval(5s) sliding(6s); +sql_error create stream stream11 trigger force_window_close into str_dst1 as select _wstart, count(*) from st1 partition by tbname interval(5s) sliding(9a); +sql_error create stream stream11 trigger force_window_close into str_dst1 as select _wstart, count(*) from st1 partition by tbname interval(5s) sliding(1.1s); +sql create stream stream11 trigger force_window_close into str_dst1 as select _wstart, _wend, count(*) from st1 partition by tbname interval(5s) sliding(1s); +run tsim/stream/checkTaskStatus.sim + +sql insert into tu11 values(now, 1); +sleep 5500 + +$loop_count = 0 + +loop01: +sleep 500 +$loop_count = $loop_count + 1 +if $loop_count == 20 then + goto end_loop0 +endi + +print insert data +sql insert into tu11 values(now, 1); +goto loop01 + +end_loop0: + +sleep 10000 + +sql select sum(`count(*)`) from (select * from str_dst1) + +if $data00 != 100 then + print expect 100, actual: $data00 + return -1 +endi + print ========================================== create database sql create database test vgroups 2; sql select * from information_schema.ins_databases -if $rows != 3 then +if $rows != 4 then return -1 endi @@ -135,4 +176,5 @@ if $data00 != 35.000000000 then return -1 endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/stream/streamInterpError.sim b/tests/script/tsim/stream/streamInterpError.sim index f0f4e80ade..258cbee703 100644 --- a/tests/script/tsim/stream/streamInterpError.sim +++ b/tests/script/tsim/stream/streamInterpError.sim @@ -73,11 +73,11 @@ sql create stream streams2_6_3 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 I sql create stream streams2_6_4 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_6_4 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); sql create stream streams2_6_5 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2_6_5 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); -sql_error create stream streams2_6_6 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_6 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); +sql create stream streams2_6_6 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_6 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(prev); sql_error create stream streams2_6_7 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_7 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(next); sql_error create stream streams2_6_8 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_8 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(linear); -sql_error create stream streams2_6_9 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_9 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); -sql_error create stream streams2_6_10 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_10 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); +sql create stream streams2_6_9 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_9 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(NULL); +sql create stream streams2_6_10 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt2_6_10 as select interp(a), _isfilled as a1, interp(b), _isfilled as a2, interp(c), _isfilled as a3, interp(d) from t1 every(1s) fill(value,11,22,33,44); run tsim/stream/checkTaskStatus.sim diff --git a/tests/script/tsim/stream/streamInterpFwcError.sim b/tests/script/tsim/stream/streamInterpFwcError.sim index a53a6fe189..67316e9661 100644 --- a/tests/script/tsim/stream/streamInterpFwcError.sim +++ b/tests/script/tsim/stream/streamInterpFwcError.sim @@ -17,8 +17,8 @@ sql create stream streams1 trigger force_window_close into streamt1 as select _ run tsim/stream/checkTaskStatus.sim -sql_error create stream streams2 trigger force_window_close IGNORE EXPIRED 0 into streamt2 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); -sql_error create stream streams3 trigger force_window_close IGNORE UPDATE 0 into streamt3 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); +sql create stream streams2 trigger force_window_close IGNORE EXPIRED 0 into streamt2 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); +sql create stream streams3 trigger force_window_close IGNORE UPDATE 0 into streamt3 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); sql create stream streams4 trigger force_window_close IGNORE EXPIRED 1 into streamt4 as select _irowts, _isfilled as a1, interp(a) as a2 from st partition by tbname every(1s) fill(prev); diff --git a/tests/script/tsim/stream/streamTwaError.sim b/tests/script/tsim/stream/streamTwaError.sim index cda5fa9c4b..67757eaa7e 100644 --- a/tests/script/tsim/stream/streamTwaError.sim +++ b/tests/script/tsim/stream/streamTwaError.sim @@ -26,7 +26,7 @@ sql_error create stream streams8 trigger force_window_close IGNORE EXPIRED 1 IGN sql_error create stream streams9 trigger at_once IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt9 as select _wstart, elapsed(ts) from st partition by tbname,ta interval(2s) fill(prev); -sql_error create stream streams10 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt10 as select _wstart, sum(a) from st partition by tbname,ta interval(2s) SLIDING(1s); +sql create stream streams10 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt10 as select _wstart, sum(a) from st partition by tbname,ta interval(2s) SLIDING(1s); sql create stream streams11 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt11 as select _wstart, avg(a) from st partition by tbname,ta interval(2s) SLIDING(2s); sql_error create stream streams10 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt10 as select _wstart, sum(a) from st interval(2s); diff --git a/tests/system-test/8-stream/force_window_close_interp.py b/tests/system-test/8-stream/force_window_close_interp.py index f78330411b..e2a412e629 100644 --- a/tests/system-test/8-stream/force_window_close_interp.py +++ b/tests/system-test/8-stream/force_window_close_interp.py @@ -81,29 +81,29 @@ class TDTestCase: # create error stream tdLog.info("create error stream") sleep(10) - tdSql.error( - f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + tdSql.execute( + f"create stream itp_force_error_0 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_0_s as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + ) + tdSql.execute( + f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_1_s as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" ) tdSql.error( - f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + f"create stream itp_force_error_2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_2_s as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" ) tdSql.error( - f"create stream itp_force_error_1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 1 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c1,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" + f"create stream itp_force_error_3 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_3_s as select _irowts,tbname,_isfilled,interp(c11,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" ) tdSql.error( - f"create stream itp_force_error_1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 0 into itp_force_error_1 as select _irowts,tbname,_isfilled,interp(c11,1) from {self.stb_name} partition by tbname every(5s) fill(prev) ;" - ) - tdSql.error( - f"create stream itp_1d_next_error_1 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into itp_1d_next_error_t1 as select _irowts,tbname,_isfilled,interp(current) from {self.stb_name} where groupid=100 partition by every(5s) fill(next) ;" + f"create stream itp_1d_next_error_0 trigger force_window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into itp_1d_next_error_t0 as select _irowts,tbname,_isfilled,interp(current) from {self.stb_name} where groupid=100 partition by every(5s) fill(next) ;" ) tdSql.error( f"create stream itp_1d_next_error_1 trigger at_once FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into itp_1d_next_error_t1 as select _irowts,tbname,_isfilled,interp(current) from {self.stb_name} where groupid=100 partition by every(5s) fill(next) ;" ) tdSql.error( - f"create stream itp_1d_next_error_1 trigger window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into itp_1d_next_error_t1 as select _irowts,tbname,_isfilled,interp(current) from {self.stb_name} where groupid=100 partition by every(5s) fill(next) ;" + f"create stream itp_1d_next_error_2 trigger window_close FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into itp_1d_next_error_t2 as select _irowts,tbname,_isfilled,interp(current) from {self.stb_name} where groupid=100 partition by every(5s) fill(next) ;" ) tdSql.error( - f"create stream itp_1d_next_error_1 trigger max_delay 5s FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into itp_1d_next_error_t1 as select _irowts,tbname,_isfilled,interp(current) from {self.stb_name} where groupid=100 partition by every(5s) fill(next) ;" + f"create stream itp_1d_next_error_3 trigger max_delay 5s FILL_HISTORY 1 IGNORE EXPIRED 1 IGNORE UPDATE 1 into itp_1d_next_error_t3 as select _irowts,tbname,_isfilled,interp(current) from {self.stb_name} where groupid=100 partition by every(5s) fill(next) ;" ) # function name : interp From 1099d91280cf76229ee52d51eadd18892c3537f1 Mon Sep 17 00:00:00 2001 From: Simon Guan Date: Wed, 19 Mar 2025 10:52:02 +0800 Subject: [PATCH 21/23] fix: update error codes while balance leader (#30261) --- docs/en/14-reference/09-error-code.md | 1 + docs/zh/14-reference/09-error-code.md | 1 + source/util/src/terror.c | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/en/14-reference/09-error-code.md b/docs/en/14-reference/09-error-code.md index 965dc2e40c..e801580583 100644 --- a/docs/en/14-reference/09-error-code.md +++ b/docs/en/14-reference/09-error-code.md @@ -168,6 +168,7 @@ This document details the server error codes that may be encountered when using | 0x8000038B | Index not exist | Does not exist | Confirm if the operation is correct | | 0x80000396 | Database in creating status | Database is being created | Retry | | 0x8000039A | Invalid system table name | Internal error | Report issue | +| 0x8000039F | No VGroup's leader need to be balanced | Perform balance leader operation on VGroup | There is no VGroup's leader needs to be balanced | | 0x800003A0 | Mnode already exists | Already exists | Confirm if the operation is correct | | 0x800003A1 | Mnode not there | Already exists | Confirm if the operation is correct | | 0x800003A2 | Qnode already exists | Already exists | Confirm if the operation is correct | diff --git a/docs/zh/14-reference/09-error-code.md b/docs/zh/14-reference/09-error-code.md index 15daa2c279..9bf3ceb933 100644 --- a/docs/zh/14-reference/09-error-code.md +++ b/docs/zh/14-reference/09-error-code.md @@ -174,6 +174,7 @@ description: TDengine 服务端的错误码列表和详细说明 | 0x8000038B | Index not exist | 不存在 | 确认操作是否正确 | | 0x80000396 | Database in creating status | 数据库正在被创建 | 重试 | | 0x8000039A | Invalid system table name | 内部错误 | 上报 issue | +| 0x8000039F | No VGroup's leader need to be balanced | 执行 balance vgroup leader 操作 | 没有需要进行 balance leader 操作的 VGroup | | 0x800003A0 | Mnode already exists | 已存在 | 确认操作是否正确 | | 0x800003A1 | Mnode not there | 已存在 | 确认操作是否正确 | | 0x800003A2 | Qnode already exists | 已存在 | 确认操作是否正确 | diff --git a/source/util/src/terror.c b/source/util/src/terror.c index f64bd7c6ea..57040fc95c 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -277,7 +277,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_ENCRYPT_NOT_ALLOW_CHANGE, "Encryption is not all TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_WAL_LEVEL, "Invalid option, wal_level 0 should be used with replica 1") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_LIST_FMT, "Invalid dnode list format") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_LIST_REPEAT, "Duplicate items in the dnode list") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_VGROUP_ON_DB, "No enough vgroup on db to execute") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_VGROUP_ON_DB, "No VGroup's leader need to be balanced") // mnode-node TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_ALREADY_EXIST, "Mnode already exists") From d7c75f3b262058e6270d772dd655d980767a0398 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Wed, 19 Mar 2025 11:00:00 +0800 Subject: [PATCH 22/23] docs: fix password tutorial display (#30263) --- docs/en/27-train-faq/03-password.md | 19 +++++++++---------- docs/zh/27-train-faq/03-password.md | 19 +++++++++---------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/docs/en/27-train-faq/03-password.md b/docs/en/27-train-faq/03-password.md index f6029b5092..5acfe1e34d 100644 --- a/docs/en/27-train-faq/03-password.md +++ b/docs/en/27-train-faq/03-password.md @@ -56,7 +56,7 @@ taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test ``` - + In [taosBenchmark](../../tdengine-reference/tools/taosbenchmark/), note the following: @@ -79,15 +79,12 @@ When using `taosBenchmark -f `, there are no restrictions on the password Example of exporting data with user `user1`: ```shell -taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' -t 'csv:./test.csv' +taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' \ + -t 'csv:./test.csv' ``` Note that if the password can be URL decoded, the URL decoded result will be used as the password. For example: `taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` is equivalent to `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041`. - - - - No special handling is required in [Explorer](../../tdengine-reference/components/taosexplorer/), just use it directly. @@ -246,7 +243,7 @@ async function createConnect() { console.log("Connected to " + dsn + " successfully."); return conn; } catch (err) { - console.log("Failed to connect to " + dsn + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.log("Connection failed with code: " + err.code + ", message: " + err.message); throw err; } } @@ -278,7 +275,7 @@ TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6 ``` - + When using passwords in REST API, note the following: @@ -288,8 +285,10 @@ When using passwords in REST API, note the following: The following two methods are equivalent: ```shell -curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' -d 'show databases' http://localhost:6041/rest/sql -curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' -d 'show databases' http://localhost:6041/rest/sql +curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' \ + -d 'show databases' http://localhost:6041/rest/sql +curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' \ + -d 'show databases' http://localhost:6041/rest/sql ``` diff --git a/docs/zh/27-train-faq/03-password.md b/docs/zh/27-train-faq/03-password.md index 7ea215999d..55f336798a 100644 --- a/docs/zh/27-train-faq/03-password.md +++ b/docs/zh/27-train-faq/03-password.md @@ -56,7 +56,7 @@ taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test ``` - + 在 [taosBenchmark](../../reference/tools/taosbenchmark/) 中使用需要注意以下几点: @@ -79,15 +79,12 @@ taosBenchmark -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -d test -y 使用用户 `user1` 导出数据示例如下: ```shell -taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' -t 'csv:./test.csv' +taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' \ + -t 'csv:./test.csv' ``` 需要注意的是,如果密码可被 URL decode,则会使用 URL decoded 结果作为密码。如:`taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` 与 `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041` 是等价的。 - - - - 在 [Explorer](../../reference/components/explorer/) 中无需特殊处理,直接使用即可。 @@ -246,7 +243,7 @@ async function createConnect() { console.log("Connected to " + dsn + " successfully."); return conn; } catch (err) { - console.log("Failed to connect to " + dsn + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); + console.log("Connection failed with code: " + err.code + ", message: " + err.message); throw err; } } @@ -278,7 +275,7 @@ TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6 ``` - + REST API 中使用密码时,需要注意以下几点: @@ -288,8 +285,10 @@ REST API 中使用密码时,需要注意以下几点: 以下两种方式等价: ```shell -curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' -d 'show databases' http://localhost:6041/rest/sql -curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' -d 'show databases' http://localhost:6041/rest/sql +curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' \ + -d 'show databases' http://localhost:6041/rest/sql +curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' \ + -d 'show databases' http://localhost:6041/rest/sql ``` From be8a0cc62775cfbd4c2195f0e69808336fe2dcb0 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Wed, 19 Mar 2025 13:35:15 +0800 Subject: [PATCH 23/23] docs: add notes for password handling in Node.js (#30267) --- docs/en/27-train-faq/03-password.md | 2 ++ docs/zh/27-train-faq/03-password.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/en/27-train-faq/03-password.md b/docs/en/27-train-faq/03-password.md index 5acfe1e34d..0a46dd967b 100644 --- a/docs/en/27-train-faq/03-password.md +++ b/docs/en/27-train-faq/03-password.md @@ -229,6 +229,8 @@ let connection = TaosBuilder::from_dsn(&dsn)?.build().await?; +Starting from version 3.1.5, the Node.js connector supports passwords containing all valid characters. + ```js const taos = require("@tdengine/websocket"); diff --git a/docs/zh/27-train-faq/03-password.md b/docs/zh/27-train-faq/03-password.md index 55f336798a..39fbaff0f3 100644 --- a/docs/zh/27-train-faq/03-password.md +++ b/docs/zh/27-train-faq/03-password.md @@ -229,6 +229,8 @@ let connection = TaosBuilder::from_dsn(&dsn)?.build().await?; +从 3.1.5 版本开始,Node.js 连接器支持密码中包含特殊字符无需特殊处理。 + ```js const taos = require("@tdengine/websocket");