diff --git a/.gitignore b/.gitignore index 07003bda4c..ff27b53139 100644 --- a/.gitignore +++ b/.gitignore @@ -156,6 +156,9 @@ pcre2_grep_test.sh pcre2_chartables.c geos-config config.h +!contrib/xml2-cmake +!contrib/xml2-cmake/linux_x86_64/include/config.h +!contrib/xml2-cmake/CMakeLists.txt pcre2.h zconf.h version.h diff --git a/README.md b/README.md index e4814cee67..f540b1cc43 100644 --- a/README.md +++ b/README.md @@ -26,24 +26,33 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine # Table of Contents -1. [What is TDengine?](#1-what-is-tdengine) -2. [Documentation](#2-documentation) -3. [Building](#3-building) - 1. [Install build tools](#31-install-build-tools) - 1. [Get the source codes](#32-get-the-source-codes) - 1. [Special Note](#33-special-note) - 1. [Build TDengine](#34-build-tdengine) -4. [Installing](#4-installing) - 1. [On Linux platform](#41-on-linux-platform) - 1. [On Windows platform](#42-on-windows-platform) - 1. [On macOS platform](#43-on-macos-platform) - 1. [Quick Run](#44-quick-run) -5. [Try TDengine](#5-try-tdengine) -6. [Developing with TDengine](#6-developing-with-tdengine) -7. [Contribute to TDengine](#7-contribute-to-tdengine) -8. [Join the TDengine Community](#8-join-the-tdengine-community) +1. [Introduction](#1-introduction) +1. [Documentation](#2-documentation) +1. [Prerequisites](#3-prerequisites) + - [3.1 Prerequisites On Linux](#31-on-linux) + - [3.2 Prerequisites On macOS](#32-on-macos) + - [3.3 Prerequisites On Windows](#33-on-windows) + - [3.4 Clone the repo](#34-clone-the-repo) +1. [Building](#4-building) + - [4.1 Build on Linux](#41-build-on-linux) + - [4.2 Build on macOS](#42-build-on-macos) + - [4.3 Build On Windows](#43-build-on-windows) +1. [Packaging](#5-packaging) +1. [Installation](#6-installation) + - [6.1 Install on Linux](#61-install-on-linux) + - [6.2 Install on macOS](#62-install-on-macos) + - [6.3 Install on Windows](#63-install-on-windows) +1. [Running](#7-running) + - [7.1 Run TDengine on Linux](#71-run-tdengine-on-linux) + - [7.2 Run TDengine on macOS](#72-run-tdengine-on-macos) + - [7.3 Run TDengine on Windows](#73-run-tdengine-on-windows) +1. [Testing](#8-testing) +1. [Releasing](#9-releasing) +1. [Workflow](#10-workflow) +1. [Coverage](#11-coverage) +1. [Contributing](#12-contributing) -# 1. What is TDengine? +# 1. Introduction TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages: @@ -65,132 +74,91 @@ For a full list of TDengine competitive advantages, please [check here](https:// For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com)) -# 3. Building +# 3. Prerequisites -At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment. +## 3.1 On Linux -You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source. +
-TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. +Install required tools on Linux -To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. - -## 3.1 Install build tools - -### Ubuntu 18.04 and above or Debian +### For Ubuntu 18.04、20.04、22.04 ```bash -sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev +sudo apt-get udpate +sudo apt-get install -y gcc cmake build-essential git libjansson-dev \ + libsnappy-dev liblzma-dev zlib1g-dev pkg-config ``` -#### Install build dependencies for taosTools - -To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed. +### For CentOS 8 ```bash -sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config -``` - -### CentOS 7.9 - -```bash -sudo yum install epel-release sudo yum update -sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel -sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake +yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core +yum config-manager --set-enabled powertools +yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static ``` -### CentOS 8/Fedora/Rocky Linux +
+ +## 3.2 On macOS + +
+ +Install required tools on macOS + +Please intall the dependencies with [brew](https://brew.sh/). ```bash -sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel -``` - -#### Install build dependencies for taosTools on CentOS - -#### CentOS 7.9 - -``` -sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel -``` - -#### CentOS 8/Fedora/Rocky Linux - -``` -sudo yum install -y epel-release -sudo yum install -y dnf-plugins-core -sudo yum config-manager --set-enabled powertools -sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel -``` - -Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well. - -If the PowerTools installation fails, you can try to use: - -``` -sudo yum config-manager --set-enabled powertools -``` - -#### For CentOS + devtoolset - -Besides above dependencies, please run following commands: - -``` -sudo yum install centos-release-scl -sudo yum install devtoolset-9 devtoolset-9-libatomic-devel -scl enable devtoolset-9 -- bash -``` - -### macOS - -``` brew install argp-standalone gflags pkgconfig ``` -### Setup golang environment +
-TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup. +## 3.3 On Windows -Please use version 1.20+. For the user in China, we recommend using a proxy to accelerate package downloading. +
-``` -go env -w GO111MODULE=on -go env -w GOPROXY=https://goproxy.cn,direct -``` +Install required tools on Windows -The default will not build taosAdapter, but you can use the following command to build taosAdapter as the service for RESTful interface. +Work in Progress. -``` -cmake .. -DBUILD_HTTP=false -``` +
-### Setup rust environment +## 3.4 Clone the repo -TDengine includes a few components developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup. +
-## 3.2 Get the source codes +Clone the repo -First of all, you may clone the source codes from github: +Clone the repository to the target machine: ```bash git clone https://github.com/taosdata/TDengine.git cd TDengine ``` -You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. -``` -[url "git@github.com:"] - insteadOf = https://github.com/ -``` +> **NOTE:** +> TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust). -## 3.3 Special Note +
-[JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository. +# 4. Building -## 3.4 Build TDengine +At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment. -### On Linux platform +You can choose to install through source code, [container](https://docs.tdengine.com/get-started/deploy-in-docker/), [installation package](https://docs.tdengine.com/get-started/deploy-from-package/) or [Kubernetes](https://docs.tdengine.com/operations-and-maintenance/deploy-your-cluster/#kubernetes-deployment). This quick guide only applies to install from source. + +TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine. + +To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory. + +## 4.1 Build on Linux + +
+ +Detailed steps to build on Linux You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below: @@ -201,29 +169,46 @@ You can run the bash script `build.sh` to build both TDengine and taosTools incl It equals to execute following commands: ```bash -mkdir debug -cd debug +mkdir debug && cd debug cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true make ``` You can use Jemalloc as memory allocator instead of glibc: -``` -apt install autoconf +```bash cmake .. -DJEMALLOC_ENABLED=true ``` -TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform. -You can also specify CPUTYPE option like aarch64 too if the detection result is not correct: - -aarch64: +TDengine build script can auto-detect the host machine's architecture on x86, x86-64, arm64 platform. +You can also specify architecture manually by CPUTYPE option: ```bash cmake .. -DCPUTYPE=aarch64 && cmake --build . ``` -### On Windows platform +
+ +## 4.2 Build on macOS + +
+ +Detailed steps to build on macOS + +Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. + +```shell +mkdir debug && cd debug +cmake .. && cmake --build . +``` + +
+ +## 4.3 Build on Windows + +
+ +Detailed steps to build on Windows If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat. @@ -254,31 +239,67 @@ mkdir debug && cd debug cmake .. -G "NMake Makefiles" nmake ``` +
-### On macOS platform +# 5. Packaging -Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. +The TDengine community installer can NOT be created by this repository only, due to some component dependencies. We are still working on this improvement. -```shell -mkdir debug && cd debug -cmake .. && cmake --build . -``` +# 6. Installation -# 4. Installing +## 6.1 Install on Linux -## 4.1 On Linux platform +
-After building successfully, TDengine can be installed by +Detailed steps to install on Linux + +After building successfully, TDengine can be installed by: ```bash sudo make install ``` -Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. +Installing from source code will also configure service management for TDengine. Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it. -Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it. +
-To start the service after installation, in a terminal, use: +## 6.2 Install on macOS + +
+ +Detailed steps to install on macOS + +After building successfully, TDengine can be installed by: + +```bash +sudo make install +``` + +
+ +## 6.3 Install on Windows + +
+ +Detailed steps to install on windows + +After building successfully, TDengine can be installed by: + +```cmd +nmake install +``` + +
+ +# 7. Running + +## 7.1 Run TDengine on Linux + +
+ +Detailed steps to run on Linux + +To start the service after installation on linux, in a terminal, use: ```bash sudo systemctl start taosd @@ -292,27 +313,29 @@ taos If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. -## 4.2 On Windows platform - -After building successfully, TDengine can be installed by: - -```cmd -nmake install -``` - -## 4.3 On macOS platform - -After building successfully, TDengine can be installed by: +If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) ```bash -sudo make install +./build/bin/taosd -c test/cfg ``` -Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section. +In another terminal, use the TDengine CLI to connect the server: -Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it. +```bash +./build/bin/taos -c test/cfg +``` -To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use: +Option `-c test/cfg` specifies the system configuration file directory. + +
+ +## 7.2 Run TDengine on macOS + +
+ +Detailed steps to run on macOS + +To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use: ```bash sudo launchctl start com.tdengine.taosd @@ -326,64 +349,63 @@ taos If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. -## 4.4 Quick Run +
-If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) -```bash -./build/bin/taosd -c test/cfg +## 7.3 Run TDengine on Windows + +
+ +Detailed steps to run on windows + +You can start TDengine server on Windows platform with below commands: + +```cmd +.\build\bin\taosd.exe -c test\cfg ``` In another terminal, use the TDengine CLI to connect the server: -```bash -./build/bin/taos -c test/cfg +```cmd +.\build\bin\taos.exe -c test\cfg ``` option "-c test/cfg" specifies the system configuration file directory. -# 5. Try TDengine +
-It is easy to run SQL commands from TDengine CLI which is the same as other SQL databases. +# 8. Testing -```sql -CREATE DATABASE demo; -USE demo; -CREATE TABLE t (ts TIMESTAMP, speed INT); -INSERT INTO t VALUES('2019-07-15 00:00:00', 10); -INSERT INTO t VALUES('2019-07-15 01:00:00', 20); -SELECT * FROM t; - ts | speed | -=================================== - 19-07-15 00:00:00.000| 10| - 19-07-15 01:00:00.000| 20| -Query OK, 2 row(s) in set (0.001700s) +For how to run different types of tests on TDengine, please see [Testing TDengine](./tests/README.md). + +# 9. Releasing + +For the complete list of TDengine Releases, please see [Releases](https://github.com/taosdata/TDengine/releases). + +# 10. Workflow + +TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). More workflows will be available soon. + +# 11. Coverage + +Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine) + +
+ +How to run the coverage report locally? +To create the test coverage report (in HTML format) locally, please run following commands: + +```bash +cd tests +bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task +# on main branch and run cases in longtimeruning_cases.task +# for more infomation about options please refer to ./run_local_coverage.sh -h ``` +> **NOTE:** +> Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time. -# 6. Developing with TDengine +
-## Official Connectors +# 12. Contributing -TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation. - -- [Java](https://docs.tdengine.com/reference/connectors/java/) -- [C/C++](https://docs.tdengine.com/reference/connectors/cpp/) -- [Python](https://docs.tdengine.com/reference/connectors/python/) -- [Go](https://docs.tdengine.com/reference/connectors/go/) -- [Node.js](https://docs.tdengine.com/reference/connectors/node/) -- [Rust](https://docs.tdengine.com/reference/connectors/rust/) -- [C#](https://docs.tdengine.com/reference/connectors/csharp/) -- [RESTful API](https://docs.tdengine.com/reference/connectors/rest-api/) - -# 7. Contribute to TDengine - -Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project. - -# 8. Join the TDengine Community - -For more information about TDengine, you can follow us on social media and join our Discord server: - -- [Discord](https://discord.com/invite/VZdSuUg4pS) -- [Twitter](https://twitter.com/TDengineDB) -- [LinkedIn](https://www.linkedin.com/company/tdengine/) -- [YouTube](https://www.youtube.com/@tdengine) +Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine. diff --git a/cmake/addr2line_CMakeLists.txt.in b/cmake/addr2line_CMakeLists.txt.in index 93fb9bb96c..7cfcb46718 100644 --- a/cmake/addr2line_CMakeLists.txt.in +++ b/cmake/addr2line_CMakeLists.txt.in @@ -2,7 +2,7 @@ # addr2line ExternalProject_Add(addr2line GIT_REPOSITORY https://github.com/davea42/libdwarf-addr2line.git - GIT_TAG master + GIT_TAG main SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line" BINARY_DIR "${TD_CONTRIB_DIR}/addr2line" CONFIGURE_COMMAND "" diff --git a/cmake/azure_CMakeLists.txt.in b/cmake/azure_CMakeLists.txt.in index 5aa32b70e5..d9e47ce6b1 100644 --- a/cmake/azure_CMakeLists.txt.in +++ b/cmake/azure_CMakeLists.txt.in @@ -2,6 +2,7 @@ ExternalProject_Add(azure URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9 + DEPENDS xml2 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1" diff --git a/cmake/cmake.version b/cmake/cmake.version index 13fac68e3a..ad78dbbc1e 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.3.5.0.alpha") + SET(TD_VER_NUMBER "3.3.5.2.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 6494177faf..2a14018810 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -12,7 +12,7 @@ ExternalProject_Add(curl2 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug + CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug BUILD_COMMAND make -j INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/ssl_CMakeLists.txt.in b/cmake/ssl_CMakeLists.txt.in index 1098593943..81e1cb15e9 100644 --- a/cmake/ssl_CMakeLists.txt.in +++ b/cmake/ssl_CMakeLists.txt.in @@ -6,9 +6,9 @@ ExternalProject_Add(openssl DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" SOURCE_DIR "${TD_CONTRIB_DIR}/openssl" BUILD_IN_SOURCE TRUE - #BUILD_ALWAYS 1 - #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared + BUILD_ALWAYS 1 + UPDATE_COMMAND "" + CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared BUILD_COMMAND make -j INSTALL_COMMAND make install_sw -j TEST_COMMAND "" diff --git a/cmake/xml2_CMakeLists.txt.in b/cmake/xml2_CMakeLists.txt.in index 0e7492aea7..8dcd89efc0 100644 --- a/cmake/xml2_CMakeLists.txt.in +++ b/cmake/xml2_CMakeLists.txt.in @@ -1,19 +1,16 @@ # xml2 ExternalProject_Add(xml2 - URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz - URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6 - #https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz - #GIT_REPOSITORY https://github.com/GNOME/libxml2 - #GIT_TAG v2.11.5 + URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.10.4.tar.gz + URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780 DOWNLOAD_NO_PROGRESS 1 DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" - SOURCE_DIR "${TD_CONTRIB_DIR}/xml2" + SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma - BUILD_COMMAND make -j - INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" TEST_COMMAND "" GIT_SHALLOW true ) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 78eded3928..767df03d22 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -17,7 +17,6 @@ elseif(${BUILD_WITH_COS}) file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) endif(${BUILD_WITH_COS}) configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") @@ -146,11 +145,16 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# libcurl +if(NOT ${TD_WINDOWS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/) + cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(NOT ${TD_WINDOWS}) + # s3 if(${BUILD_WITH_S3}) - cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_S3) @@ -160,7 +164,6 @@ elseif(${BUILD_WITH_COS}) # cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) # cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) # cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - # cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif() @@ -199,6 +202,11 @@ endif() # lemon cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers +IF(${TD_DARWIN}) + SET(CONTRIB_CONFIG_ENV "CC=cc") +ENDIF() + # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . @@ -652,7 +660,12 @@ if(${BUILD_PCRE2}) endif(${BUILD_PCRE2}) if(${TD_LINUX} AND ${BUILD_WITH_S3}) - add_subdirectory(azure-cmake EXCLUDE_FROM_ALL) + set(ORIG_CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) + string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") + add_subdirectory(xml2-cmake) + set(CMAKE_C_FLAGS ${ORIG_CMAKE_C_FLAGS}) + + add_subdirectory(azure-cmake) endif() IF(TD_LINUX) diff --git a/contrib/azure-cmake/CMakeLists.txt b/contrib/azure-cmake/CMakeLists.txt index aaa5617860..eaf4c569e7 100644 --- a/contrib/azure-cmake/CMakeLists.txt +++ b/contrib/azure-cmake/CMakeLists.txt @@ -36,10 +36,6 @@ target_include_directories( ) find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) -find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - -# find_library(CURL_LIBRARY curl) -# find_library(XML2_LIBRARY xml2) find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) @@ -50,9 +46,8 @@ target_link_libraries( PRIVATE ${CURL_LIBRARY} PRIVATE ${SSL_LIBRARY} PRIVATE ${CRYPTO_LIBRARY} - PRIVATE ${XML2_LIBRARY} - # PRIVATE xml2 + PRIVATE _libxml2 PRIVATE zlib # PRIVATE ${CoreFoundation_Library} diff --git a/contrib/xml2-cmake/CMakeLists.txt b/contrib/xml2-cmake/CMakeLists.txt new file mode 100644 index 0000000000..9067c0e6e7 --- /dev/null +++ b/contrib/xml2-cmake/CMakeLists.txt @@ -0,0 +1,58 @@ +set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2") + +set(SRCS + "${LIBXML2_SOURCE_DIR}/SAX.c" + "${LIBXML2_SOURCE_DIR}/entities.c" + "${LIBXML2_SOURCE_DIR}/encoding.c" + "${LIBXML2_SOURCE_DIR}/error.c" + "${LIBXML2_SOURCE_DIR}/parserInternals.c" + "${LIBXML2_SOURCE_DIR}/parser.c" + "${LIBXML2_SOURCE_DIR}/tree.c" + "${LIBXML2_SOURCE_DIR}/hash.c" + "${LIBXML2_SOURCE_DIR}/list.c" + "${LIBXML2_SOURCE_DIR}/xmlIO.c" + "${LIBXML2_SOURCE_DIR}/xmlmemory.c" + "${LIBXML2_SOURCE_DIR}/uri.c" + "${LIBXML2_SOURCE_DIR}/valid.c" + "${LIBXML2_SOURCE_DIR}/xlink.c" + "${LIBXML2_SOURCE_DIR}/HTMLparser.c" + "${LIBXML2_SOURCE_DIR}/HTMLtree.c" + "${LIBXML2_SOURCE_DIR}/debugXML.c" + "${LIBXML2_SOURCE_DIR}/xpath.c" + "${LIBXML2_SOURCE_DIR}/xpointer.c" + "${LIBXML2_SOURCE_DIR}/xinclude.c" + "${LIBXML2_SOURCE_DIR}/nanohttp.c" + "${LIBXML2_SOURCE_DIR}/nanoftp.c" + "${LIBXML2_SOURCE_DIR}/catalog.c" + "${LIBXML2_SOURCE_DIR}/globals.c" + "${LIBXML2_SOURCE_DIR}/threads.c" + "${LIBXML2_SOURCE_DIR}/c14n.c" + "${LIBXML2_SOURCE_DIR}/xmlstring.c" + "${LIBXML2_SOURCE_DIR}/buf.c" + "${LIBXML2_SOURCE_DIR}/xmlregexp.c" + "${LIBXML2_SOURCE_DIR}/xmlschemas.c" + "${LIBXML2_SOURCE_DIR}/xmlschemastypes.c" + "${LIBXML2_SOURCE_DIR}/xmlunicode.c" + "${LIBXML2_SOURCE_DIR}/triostr.c" + "${LIBXML2_SOURCE_DIR}/xmlreader.c" + "${LIBXML2_SOURCE_DIR}/relaxng.c" + "${LIBXML2_SOURCE_DIR}/dict.c" + "${LIBXML2_SOURCE_DIR}/SAX2.c" + "${LIBXML2_SOURCE_DIR}/xmlwriter.c" + "${LIBXML2_SOURCE_DIR}/legacy.c" + "${LIBXML2_SOURCE_DIR}/chvalid.c" + "${LIBXML2_SOURCE_DIR}/pattern.c" + "${LIBXML2_SOURCE_DIR}/xmlsave.c" + "${LIBXML2_SOURCE_DIR}/xmlmodule.c" + "${LIBXML2_SOURCE_DIR}/schematron.c" + "${LIBXML2_SOURCE_DIR}/xzlib.c" +) +add_library(_libxml2 ${SRCS}) + +#target_link_libraries(_libxml2 PRIVATE td_contrib::zlib) +target_link_libraries(_libxml2 PRIVATE zlib) + +target_include_directories(_libxml2 BEFORE PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include") +target_include_directories(_libxml2 BEFORE PUBLIC "${LIBXML2_SOURCE_DIR}/include") + +add_library(td_contrib::libxml2 ALIAS _libxml2) diff --git a/contrib/xml2-cmake/linux_x86_64/include/config.h b/contrib/xml2-cmake/linux_x86_64/include/config.h new file mode 100644 index 0000000000..7969b377dc --- /dev/null +++ b/contrib/xml2-cmake/linux_x86_64/include/config.h @@ -0,0 +1,285 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Type cast for the gethostbyname() argument */ +#define GETHOSTBYNAME_ARG_CAST /**/ + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_INET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARPA_NAMESER_H 1 + +/* Whether struct sockaddr::__ss_family exists */ +/* #undef HAVE_BROKEN_SS_FAMILY */ + +/* Define to 1 if you have the header file. */ +#define HAVE_CTYPE_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DIRENT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Have dlopen based dso */ +#define HAVE_DLOPEN /**/ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_ERRNO_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_FLOAT_H 1 + +/* Define to 1 if you have the `fprintf' function. */ +#define HAVE_FPRINTF 1 + +/* Define to 1 if you have the `ftime' function. */ +#define HAVE_FTIME 1 + +/* Define if getaddrinfo is there */ +#define HAVE_GETADDRINFO /**/ + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if you have the `isascii' function. */ +#define HAVE_ISASCII 1 + +/* Define if isinf is there */ +#define HAVE_ISINF /**/ + +/* Define if isnan is there */ +#define HAVE_ISNAN /**/ + +/* Define if history library is there (-lhistory) */ +/* #undef HAVE_LIBHISTORY */ + +/* Define if pthread library is there (-lpthread) */ +#define HAVE_LIBPTHREAD /**/ + +/* Define if readline library is there (-lreadline) */ +/* #undef HAVE_LIBREADLINE */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if you have the `localtime' function. */ +#define HAVE_LOCALTIME 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LZMA_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MATH_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define to 1 if you have the `munmap' function. */ +#define HAVE_MUNMAP 1 + +/* mmap() is no good without munmap() */ +#if defined(HAVE_MMAP) && !defined(HAVE_MUNMAP) +# undef /**/ HAVE_MMAP +#endif + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_NETDB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_NETINET_IN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_POLL_H 1 + +/* Define to 1 if you have the `printf' function. */ +#define HAVE_PRINTF 1 + +/* Define if is there */ +#define HAVE_PTHREAD_H /**/ + +/* Define to 1 if you have the `putenv' function. */ +#define HAVE_PUTENV 1 + +/* Define to 1 if you have the `rand' function. */ +#define HAVE_RAND 1 + +/* Define to 1 if you have the `rand_r' function. */ +#define HAVE_RAND_R 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_RESOLV_H 1 + +/* Have shl_load based dso */ +/* #undef HAVE_SHLLOAD */ + +/* Define to 1 if you have the `signal' function. */ +#define HAVE_SIGNAL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SIGNAL_H 1 + +/* Define to 1 if you have the `snprintf' function. */ +#define HAVE_SNPRINTF 1 + +/* Define to 1 if you have the `sprintf' function. */ +#define HAVE_SPRINTF 1 + +/* Define to 1 if you have the `srand' function. */ +#define HAVE_SRAND 1 + +/* Define to 1 if you have the `sscanf' function. */ +#define HAVE_SSCANF 1 + +/* Define to 1 if you have the `stat' function. */ +#define HAVE_STAT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SELECT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SOCKET_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIMEB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the `time' function. */ +#define HAVE_TIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Whether va_copy() is available */ +#define HAVE_VA_COPY 1 + +/* Define to 1 if you have the `vfprintf' function. */ +#define HAVE_VFPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* Define to 1 if you have the `vsprintf' function. */ +#define HAVE_VSPRINTF 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_ZLIB_H */ + +/* Whether __va_copy() is available */ +/* #undef HAVE___VA_COPY */ + +/* Define as const if the declaration of iconv() needs const. */ +#define ICONV_CONST + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libxml2" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "" + +/* Type cast for the send() function 2nd arg */ +#define SEND_ARG2_CAST /**/ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Support for IPv6 */ +#define SUPPORT_IP6 /**/ + +/* Define if va_list is an array type */ +#define VA_LIST_IS_ARRAY 1 + +/* Version number of package */ +#define VERSION "2.9.8" + +/* Determine what socket length (socklen_t) data type is */ +#define XML_SOCKLEN_T socklen_t + +/* Define for Solaris 2.5.1 so the uint32_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +/* #undef _UINT32_T */ + +/* ss_family is not defined here, use __ss_family instead */ +/* #undef ss_family */ + +/* Define to the type of an unsigned integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +/* #undef uint32_t */ diff --git a/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h b/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h new file mode 100644 index 0000000000..c2faeb47cb --- /dev/null +++ b/contrib/xml2-cmake/linux_x86_64/include/libxml/xmlversion.h @@ -0,0 +1,501 @@ +/* + * Summary: compile-time version information + * Description: compile-time version information for the XML library + * + * Copy: See Copyright for the status of this software. + * + * Author: Daniel Veillard + */ + +#ifndef __XML_VERSION_H__ +#define __XML_VERSION_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * use those to be sure nothing nasty will happen if + * your library and includes mismatch + */ +#ifndef LIBXML2_COMPILING_MSCCDEF +XMLPUBFUN void XMLCALL xmlCheckVersion(int version); +#endif /* LIBXML2_COMPILING_MSCCDEF */ + +/** + * LIBXML_DOTTED_VERSION: + * + * the version string like "1.2.3" + */ +#define LIBXML_DOTTED_VERSION "2.10.3" + +/** + * LIBXML_VERSION: + * + * the version number: 1.2.3 value is 10203 + */ +#define LIBXML_VERSION 21003 + +/** + * LIBXML_VERSION_STRING: + * + * the version number string, 1.2.3 value is "10203" + */ +#define LIBXML_VERSION_STRING "21003" + +/** + * LIBXML_VERSION_EXTRA: + * + * extra version information, used to show a git commit description + */ +#define LIBXML_VERSION_EXTRA "" + +/** + * LIBXML_TEST_VERSION: + * + * Macro to check that the libxml version in use is compatible with + * the version the software has been compiled against + */ +#define LIBXML_TEST_VERSION xmlCheckVersion(21003); + +#ifndef VMS +#if 0 +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO +#else +/** + * WITHOUT_TRIO: + * + * defined if the trio support should not be configured in + */ +#define WITHOUT_TRIO +#endif +#else /* VMS */ +/** + * WITH_TRIO: + * + * defined if the trio support need to be configured in + */ +#define WITH_TRIO 1 +#endif /* VMS */ + +/** + * LIBXML_THREAD_ENABLED: + * + * Whether the thread support is configured in + */ +#if 1 +#define LIBXML_THREAD_ENABLED +#endif + +/** + * LIBXML_THREAD_ALLOC_ENABLED: + * + * Whether the allocation hooks are per-thread + */ +#if 0 +#define LIBXML_THREAD_ALLOC_ENABLED +#endif + +/** + * LIBXML_TREE_ENABLED: + * + * Whether the DOM like tree manipulation API support is configured in + */ +#if 1 +#define LIBXML_TREE_ENABLED +#endif + +/** + * LIBXML_OUTPUT_ENABLED: + * + * Whether the serialization/saving support is configured in + */ +#if 1 +#define LIBXML_OUTPUT_ENABLED +#endif + +/** + * LIBXML_PUSH_ENABLED: + * + * Whether the push parsing interfaces are configured in + */ +#if 1 +#define LIBXML_PUSH_ENABLED +#endif + +/** + * LIBXML_READER_ENABLED: + * + * Whether the xmlReader parsing interface is configured in + */ +#if 1 +#define LIBXML_READER_ENABLED +#endif + +/** + * LIBXML_PATTERN_ENABLED: + * + * Whether the xmlPattern node selection interface is configured in + */ +#if 1 +#define LIBXML_PATTERN_ENABLED +#endif + +/** + * LIBXML_WRITER_ENABLED: + * + * Whether the xmlWriter saving interface is configured in + */ +#if 1 +#define LIBXML_WRITER_ENABLED +#endif + +/** + * LIBXML_SAX1_ENABLED: + * + * Whether the older SAX1 interface is configured in + */ +#if 1 +#define LIBXML_SAX1_ENABLED +#endif + +/** + * LIBXML_FTP_ENABLED: + * + * Whether the FTP support is configured in + */ +#if 0 +#define LIBXML_FTP_ENABLED +#endif + +/** + * LIBXML_HTTP_ENABLED: + * + * Whether the HTTP support is configured in + */ +#if 1 +#define LIBXML_HTTP_ENABLED +#endif + +/** + * LIBXML_VALID_ENABLED: + * + * Whether the DTD validation support is configured in + */ +#if 1 +#define LIBXML_VALID_ENABLED +#endif + +/** + * LIBXML_HTML_ENABLED: + * + * Whether the HTML support is configured in + */ +#if 1 +#define LIBXML_HTML_ENABLED +#endif + +/** + * LIBXML_LEGACY_ENABLED: + * + * Whether the deprecated APIs are compiled in for compatibility + */ +#if 0 +#define LIBXML_LEGACY_ENABLED +#endif + +/** + * LIBXML_C14N_ENABLED: + * + * Whether the Canonicalization support is configured in + */ +#if 1 +#define LIBXML_C14N_ENABLED +#endif + +/** + * LIBXML_CATALOG_ENABLED: + * + * Whether the Catalog support is configured in + */ +#if 1 +#define LIBXML_CATALOG_ENABLED +#endif + +/** + * LIBXML_XPATH_ENABLED: + * + * Whether XPath is configured in + */ +#if 1 +#define LIBXML_XPATH_ENABLED +#endif + +/** + * LIBXML_XPTR_ENABLED: + * + * Whether XPointer is configured in + */ +#if 1 +#define LIBXML_XPTR_ENABLED +#endif + +/** + * LIBXML_XPTR_LOCS_ENABLED: + * + * Whether support for XPointer locations is configured in + */ +#if 0 +#define LIBXML_XPTR_LOCS_ENABLED +#endif + +/** + * LIBXML_XINCLUDE_ENABLED: + * + * Whether XInclude is configured in + */ +#if 1 +#define LIBXML_XINCLUDE_ENABLED +#endif + +/** + * LIBXML_ICONV_ENABLED: + * + * Whether iconv support is available + */ +#if 0 +#define LIBXML_ICONV_ENABLED +#endif + +/** + * LIBXML_ICU_ENABLED: + * + * Whether icu support is available + */ +#if 0 +#define LIBXML_ICU_ENABLED +#endif + +/** + * LIBXML_ISO8859X_ENABLED: + * + * Whether ISO-8859-* support is made available in case iconv is not + */ +#if 1 +#define LIBXML_ISO8859X_ENABLED +#endif + +/** + * LIBXML_DEBUG_ENABLED: + * + * Whether Debugging module is configured in + */ +#if 1 +#define LIBXML_DEBUG_ENABLED +#endif + +/** + * DEBUG_MEMORY_LOCATION: + * + * Whether the memory debugging is configured in + */ +#if 0 +#define DEBUG_MEMORY_LOCATION +#endif + +/** + * LIBXML_DEBUG_RUNTIME: + * + * Whether the runtime debugging is configured in + */ +#if 0 +#define LIBXML_DEBUG_RUNTIME +#endif + +/** + * LIBXML_UNICODE_ENABLED: + * + * Whether the Unicode related interfaces are compiled in + */ +#if 1 +#define LIBXML_UNICODE_ENABLED +#endif + +/** + * LIBXML_REGEXP_ENABLED: + * + * Whether the regular expressions interfaces are compiled in + */ +#if 1 +#define LIBXML_REGEXP_ENABLED +#endif + +/** + * LIBXML_AUTOMATA_ENABLED: + * + * Whether the automata interfaces are compiled in + */ +#if 1 +#define LIBXML_AUTOMATA_ENABLED +#endif + +/** + * LIBXML_EXPR_ENABLED: + * + * Whether the formal expressions interfaces are compiled in + * + * This code is unused and disabled unconditionally for now. + */ +#if 0 +#define LIBXML_EXPR_ENABLED +#endif + +/** + * LIBXML_SCHEMAS_ENABLED: + * + * Whether the Schemas validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMAS_ENABLED +#endif + +/** + * LIBXML_SCHEMATRON_ENABLED: + * + * Whether the Schematron validation interfaces are compiled in + */ +#if 1 +#define LIBXML_SCHEMATRON_ENABLED +#endif + +/** + * LIBXML_MODULES_ENABLED: + * + * Whether the module interfaces are compiled in + */ +#if 1 +#define LIBXML_MODULES_ENABLED +/** + * LIBXML_MODULE_EXTENSION: + * + * the string suffix used by dynamic modules (usually shared libraries) + */ +#define LIBXML_MODULE_EXTENSION ".so" +#endif + +/** + * LIBXML_ZLIB_ENABLED: + * + * Whether the Zlib support is compiled in + */ +#if 1 +#define LIBXML_ZLIB_ENABLED +#endif + +/** + * LIBXML_LZMA_ENABLED: + * + * Whether the Lzma support is compiled in + */ +#if 0 +#define LIBXML_LZMA_ENABLED +#endif + +#ifdef __GNUC__ + +/** + * ATTRIBUTE_UNUSED: + * + * Macro used to signal to GCC unused function parameters + */ + +#ifndef ATTRIBUTE_UNUSED +# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7))) +# define ATTRIBUTE_UNUSED __attribute__((unused)) +# else +# define ATTRIBUTE_UNUSED +# endif +#endif + +/** + * LIBXML_ATTR_ALLOC_SIZE: + * + * Macro used to indicate to GCC this is an allocator function + */ + +#ifndef LIBXML_ATTR_ALLOC_SIZE +# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)))) +# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x))) +# else +# define LIBXML_ATTR_ALLOC_SIZE(x) +# endif +#else +# define LIBXML_ATTR_ALLOC_SIZE(x) +#endif + +/** + * LIBXML_ATTR_FORMAT: + * + * Macro used to indicate to GCC the parameter are printf like + */ + +#ifndef LIBXML_ATTR_FORMAT +# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3))) +# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args))) +# else +# define LIBXML_ATTR_FORMAT(fmt,args) +# endif +#else +# define LIBXML_ATTR_FORMAT(fmt,args) +#endif + +#ifndef XML_DEPRECATED +# ifdef IN_LIBXML +# define XML_DEPRECATED +# else +/* Available since at least GCC 3.1 */ +# define XML_DEPRECATED __attribute__((deprecated)) +# endif +#endif + +#else /* ! __GNUC__ */ +/** + * ATTRIBUTE_UNUSED: + * + * Macro used to signal to GCC unused function parameters + */ +#define ATTRIBUTE_UNUSED +/** + * LIBXML_ATTR_ALLOC_SIZE: + * + * Macro used to indicate to GCC this is an allocator function + */ +#define LIBXML_ATTR_ALLOC_SIZE(x) +/** + * LIBXML_ATTR_FORMAT: + * + * Macro used to indicate to GCC the parameter are printf like + */ +#define LIBXML_ATTR_FORMAT(fmt,args) +/** + * XML_DEPRECATED: + * + * Macro used to indicate that a function, variable, type or struct member + * is deprecated. + */ +#ifndef XML_DEPRECATED +#define XML_DEPRECATED +#endif +#endif /* __GNUC__ */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif diff --git a/docs/en/07-develop/01-connect.md b/docs/en/07-develop/01-connect.md index c14eed311a..b6725ed7a4 100644 --- a/docs/en/07-develop/01-connect.md +++ b/docs/en/07-develop/01-connect.md @@ -109,7 +109,7 @@ If you are using Maven to manage your project, simply add the following dependen com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 ``` diff --git a/docs/en/07-develop/05-stmt.md b/docs/en/07-develop/05-stmt.md index 11b055bcf9..16fe156cc3 100644 --- a/docs/en/07-develop/05-stmt.md +++ b/docs/en/07-develop/05-stmt.md @@ -15,6 +15,19 @@ When inserting data using parameter binding, it can avoid the resource consumpti **Tips: It is recommended to use parameter binding for data insertion** + :::note + We only recommend using the following two forms of SQL for parameter binding data insertion: + + ```sql + a. Subtables already exists: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?) + b. Automatic table creation on insert: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) + 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) + ``` + + ::: + Next, we continue to use smart meters as an example to demonstrate the efficient writing functionality of parameter binding with various language connectors: 1. Prepare a parameterized SQL insert statement for inserting data into the supertable `meters`. This statement allows dynamically specifying subtable names, tags, and column values. diff --git a/docs/en/10-third-party/01-collection/flink.md b/docs/en/10-third-party/01-collection/flink.md index 12468b4f6c..19a767f1f6 100644 --- a/docs/en/10-third-party/01-collection/flink.md +++ b/docs/en/10-third-party/01-collection/flink.md @@ -26,7 +26,8 @@ Flink Connector supports all platforms that can run Flink 1.19 and above version | Flink Connector Version | Major Changes | TDengine Version| |-------------------------| ------------------------------------ | ---------------- | -| 2.0.0 | 1.Support SQL queries on data in TDengine database.
2. Support CDC subscription to data in TDengine database.
3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.0 and higher| +| 2.0.1 | Sink supports writing types from Rowdata implementations.| - | +| 2.0.0 | 1.Support SQL queries on data in TDengine database.
2. Support CDC subscription to data in TDengine database.
3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher| | 1.0.0 | Support Sink function to write data from other sources to TDengine in the future.| 3.3.2.0 and higher| ## Exception and error codes @@ -114,7 +115,7 @@ If using Maven to manage a project, simply add the following dependencies in pom com.taosdata.flink flink-connector-tdengine - 2.0.0 + 2.0.1 ``` diff --git a/docs/en/14-reference/01-components/01-taosd.md b/docs/en/14-reference/01-components/01-taosd.md index 55db20bef0..7456593ddb 100644 --- a/docs/en/14-reference/01-components/01-taosd.md +++ b/docs/en/14-reference/01-components/01-taosd.md @@ -77,12 +77,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s |minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000| |singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000| |filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0| -|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs| -|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan| -|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan| -|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan| |queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds| -|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages| |pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting| ### Region Related diff --git a/docs/en/14-reference/02-tools/10-taosbenchmark.md b/docs/en/14-reference/02-tools/10-taosbenchmark.md index d1a18b5d1c..1b9e148d6f 100644 --- a/docs/en/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/en/14-reference/02-tools/10-taosbenchmark.md @@ -403,7 +403,7 @@ Specify the configuration parameters for tag and data columns in `super_tables` - **min**: The minimum value for the data type of the column/tag. Generated values will be greater than or equal to the minimum value. -- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the minimum value. +- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the maximum value. - **scalingFactor**: Floating-point precision enhancement factor, only effective when the data type is float/double, valid values range from 1 to 1000000 positive integers. Used to enhance the precision of generated floating points, especially when min or max values are small. This attribute enhances the precision after the decimal point by powers of 10: a scalingFactor of 10 means enhancing the precision by 1 decimal place, 100 means 2 places, and so on. diff --git a/docs/en/14-reference/03-taos-sql/02-database.md b/docs/en/14-reference/03-taos-sql/02-database.md index 54548fe297..c6865fd162 100644 --- a/docs/en/14-reference/03-taos-sql/02-database.md +++ b/docs/en/14-reference/03-taos-sql/02-database.md @@ -65,7 +65,7 @@ database_option: { - MINROWS: The minimum number of records in a file block, default is 100. - KEEP: Indicates the number of days data files are kept, default value is 3650, range [1, 365000], and must be greater than or equal to 3 times the DURATION parameter value. The database will automatically delete data that has been saved for longer than the KEEP value to free up storage space. KEEP can use unit-specified formats, such as KEEP 100h, KEEP 10d, etc., supports m (minutes), h (hours), and d (days) three units. It can also be written without a unit, like KEEP 50, where the default unit is days. The enterprise version supports multi-tier storage feature, thus, multiple retention times can be set (multiple separated by commas, up to 3, satisfying keep 0 \<= keep 1 \<= keep 2, such as KEEP 100h,100d,3650d); the community version does not support multi-tier storage feature (even if multiple retention times are configured, it will not take effect, KEEP will take the longest retention time). - KEEP_TIME_OFFSET: Effective from version 3.2.0.0. The delay execution time for deleting or migrating data that has been saved for longer than the KEEP value, default value is 0 (hours). After the data file's save time exceeds KEEP, the deletion or migration operation will not be executed immediately, but will wait an additional interval specified by this parameter, to avoid peak business periods. -- STT_TRIGGER: Indicates the number of file merges triggered by disk files. The open-source version is fixed at 1, the enterprise version can be set from 1 to 16. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value. +- STT_TRIGGER: Indicates the number of file merges triggered by disk files. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value. - SINGLE_STABLE: Indicates whether only one supertable can be created in this database, used in cases where the supertable has a very large number of columns. - 0: Indicates that multiple supertables can be created. - 1: Indicates that only one supertable can be created. @@ -144,10 +144,6 @@ You can view cacheload through show \.vgroups; If cacheload is very close to cachesize, then cachesize may be too small. If cacheload is significantly less than cachesize, then cachesize is sufficient. You can decide whether to modify cachesize based on this principle. The specific modification value can be determined based on the available system memory, whether to double it or increase it several times. -4. stt_trigger - -Please stop database writing before modifying the stt_trigger parameter. - :::note Other parameters are not supported for modification in version 3.0.0.0 diff --git a/docs/en/14-reference/03-taos-sql/10-function.md b/docs/en/14-reference/03-taos-sql/10-function.md index ab5c48bce2..8397c59177 100644 --- a/docs/en/14-reference/03-taos-sql/10-function.md +++ b/docs/en/14-reference/03-taos-sql/10-function.md @@ -2171,7 +2171,7 @@ ignore_negative: { **Usage Instructions**: -- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE() from. +- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE(col1, 1s, 1) from tb1. ### DIFF diff --git a/docs/en/14-reference/05-connector/14-java.md b/docs/en/14-reference/05-connector/14-java.md index c28702440a..0c7fbbdda4 100644 --- a/docs/en/14-reference/05-connector/14-java.md +++ b/docs/en/14-reference/05-connector/14-java.md @@ -33,6 +33,7 @@ The JDBC driver implementation for TDengine strives to be consistent with relati | taos-jdbcdriver Version | Major Changes | TDengine Version | | ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| 3.5.2 | Fixed WebSocket result set free bug. | - | | 3.5.1 | Fixed the getObject issue in data subscription. | - | | 3.5.0 | 1. Optimized the performance of WebSocket connection parameter binding, supporting parameter binding queries using binary data.
2. Optimized the performance of small queries in WebSocket connection.
3. Added support for setting time zone and app info on WebSocket connection. | 3.3.5.0 and higher | | 3.4.0 | 1. Replaced fastjson library with jackson.
2. WebSocket uses a separate protocol identifier.
3. Optimized background thread usage to avoid user misuse leading to timeouts. | - | diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 12cf5484d4..9f4246c7a0 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -25,6 +25,10 @@ Download links for TDengine 3.x version installation packages are as follows: import Release from "/components/ReleaseV3"; +## 3.3.5.2 + + + ## 3.3.5.0 diff --git a/docs/en/28-releases/03-notes/3.3.5.2.md b/docs/en/28-releases/03-notes/3.3.5.2.md new file mode 100755 index 0000000000..ce0141cccf --- /dev/null +++ b/docs/en/28-releases/03-notes/3.3.5.2.md @@ -0,0 +1,43 @@ +--- +title: TDengine 3.3.5.2 Release Notes +sidebar_label: 3.3.5.2 +description: Version 3.3.5.2 Notes +slug: /release-history/release-notes/3.3.5.2 +--- + +## Features + 1. feat: taosX now support multiple stables with template for MQTT + +## Enhancements + 1. enh: improve taosX error message if database is invalid + 2. enh: use poetry group depencencies and reduce dep when install [#251](https://github.com/taosdata/taos-connector-python/issues/251) + 3. enh: improve backup restore using taosX + 4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader + 5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later + +## Fixes + 1. fix: the maxRetryWaitTime parameter is used to control the maximum reconnection timeout time for the client when the cluster is unable to provide services, but it does not take effect when encountering a Sync timeout error + 2. fix: supports immediate subscription to the new tag value after modifying the tag value of the sub-table + 3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails + 4. fix: taosd may crash when more than 100 views are created and the show views command is executed + 5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail + 6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail + 7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash + 8. fix: frequent execution of the “drop table with tb_uid” statement may lead to a deadlock in taosd + 9. fix: the potential deadlock during the switching of log files + 10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema) + 11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up + 12. fix: incorrect error reporting when attempting to write Geometry data types that do not conform to topological specifications through the STMT interface + 13. fix: when using the percentile function and session window in a query statement, if an error occurs, taosd may crash + 14. fix: the issue of being unable to dynamically modify system parameters + 15. fix: random error of tranlict transaction in replication + 16. fix: the same consumer executes the unsubscribe operation and immediately attempts to subscribe to other different topics, the subscription API will return an error + 17. fix: fix CVE-2022-28948 security issue in go connector + 18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error + 19. fix: when changing the database from a single replica to a mulit replica, if there are some metadata generated by earlier versions that are no longer used in the new version, the modification operation will fail + 20. fix: column names were not correctly copied when using SELECT * FROM subqueries + 21. fix: when performing max/min function on string type data, the results are inaccurate and taosd will crash + 22. fix: stream computing does not support the use of the HAVING clause, but no error is reported during creation + 23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition + 24. fix: in certain specific query scenarios, when JOIN and CAST are used together, taosd may crash + diff --git a/docs/en/28-releases/03-notes/index.md b/docs/en/28-releases/03-notes/index.md index e54862e105..5ff7350e6c 100644 --- a/docs/en/28-releases/03-notes/index.md +++ b/docs/en/28-releases/03-notes/index.md @@ -5,6 +5,7 @@ slug: /release-history/release-notes [3.3.5.0](./3-3-5-0/) +[3.3.5.2](./3.3.5.2) [3.3.4.8](./3-3-4-8/) [3.3.4.3](./3-3-4-3/) diff --git a/docs/examples/JDBC/JDBCDemo/pom.xml b/docs/examples/JDBC/JDBCDemo/pom.xml index 78262712e9..a80f7a9cdf 100644 --- a/docs/examples/JDBC/JDBCDemo/pom.xml +++ b/docs/examples/JDBC/JDBCDemo/pom.xml @@ -19,7 +19,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 org.locationtech.jts diff --git a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java index 0de386447c..0a63504b91 100644 --- a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java +++ b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java @@ -1,6 +1,4 @@ package com.taosdata.example; - -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.AbstractStatement; import java.sql.*; diff --git a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java index ec4adf8db9..7fba500c49 100644 --- a/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java +++ b/docs/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java @@ -104,8 +104,9 @@ public class JdbcDemo { private void executeQuery(String sql) { long start = System.currentTimeMillis(); - try (Statement statement = connection.createStatement()) { - ResultSet resultSet = statement.executeQuery(sql); + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(sql)) { + long end = System.currentTimeMillis(); printSql(sql, true, (end - start)); Util.printResult(resultSet); diff --git a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml index 7ff4a72f5e..20da9bfae8 100644 --- a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/connectionPools/pom.xml b/docs/examples/JDBC/connectionPools/pom.xml index 70be6ed527..86e6fb04a4 100644 --- a/docs/examples/JDBC/connectionPools/pom.xml +++ b/docs/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/consumer-demo/pom.xml b/docs/examples/JDBC/consumer-demo/pom.xml index c9537a93bf..e439c22224 100644 --- a/docs/examples/JDBC/consumer-demo/pom.xml +++ b/docs/examples/JDBC/consumer-demo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 com.google.guava diff --git a/docs/examples/JDBC/mybatisplus-demo/pom.xml b/docs/examples/JDBC/mybatisplus-demo/pom.xml index effb13cfe8..8b4777bfb0 100644 --- a/docs/examples/JDBC/mybatisplus-demo/pom.xml +++ b/docs/examples/JDBC/mybatisplus-demo/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/springbootdemo/pom.xml b/docs/examples/JDBC/springbootdemo/pom.xml index 25b503b0e6..825f6fb9c2 100644 --- a/docs/examples/JDBC/springbootdemo/pom.xml +++ b/docs/examples/JDBC/springbootdemo/pom.xml @@ -70,7 +70,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/JDBC/taosdemo/pom.xml b/docs/examples/JDBC/taosdemo/pom.xml index a80deeff94..c8f9c73d9d 100644 --- a/docs/examples/JDBC/taosdemo/pom.xml +++ b/docs/examples/JDBC/taosdemo/pom.xml @@ -67,7 +67,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/examples/flink/Main.java b/docs/examples/flink/Main.java index 12d79126cf..50a507d1de 100644 --- a/docs/examples/flink/Main.java +++ b/docs/examples/flink/Main.java @@ -263,7 +263,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") Class> typeClass = (Class>) (Class) SourceRecords.class; SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters"); TDengineSource> source = new TDengineSource<>(connProps, sql, typeClass); - DataStreamSource> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction, String>) records -> { StringBuilder sb = new StringBuilder(); Iterator iterator = records.iterator(); @@ -304,7 +304,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData"); config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8"); TDengineCdcSource tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class); - DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction) rowData -> { StringBuilder sb = new StringBuilder(); sb.append("tsxx: " + rowData.getTimestamp(0, 0) + @@ -343,7 +343,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") Class> typeClass = (Class>) (Class) ConsumerRecords.class; TDengineCdcSource> tdengineSource = new TDengineCdcSource<>("topic_meters", config, typeClass); - DataStreamSource> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction, String>) records -> { Iterator> iterator = records.iterator(); StringBuilder sb = new StringBuilder(); @@ -388,7 +388,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location") config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultDeserializer"); config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8"); TDengineCdcSource tdengineSource = new TDengineCdcSource<>("topic_meters", config, ResultBean.class); - DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source"); + DataStreamSource input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source"); DataStream resultStream = input.map((MapFunction) rowData -> { StringBuilder sb = new StringBuilder(); sb.append("ts: " + rowData.getTs() + diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index 63ce3159e6..4569742ab4 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -22,7 +22,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index fa22f750f5..a6e30ccb9c 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 com.taosdata.jdbc taos-jdbcdriver - 3.5.1 + 3.5.2 ``` diff --git a/docs/zh/07-develop/05-stmt.md b/docs/zh/07-develop/05-stmt.md index 1917a86e74..5f218689be 100644 --- a/docs/zh/07-develop/05-stmt.md +++ b/docs/zh/07-develop/05-stmt.md @@ -15,6 +15,19 @@ import TabItem from "@theme/TabItem"; **Tips: 数据写入推荐使用参数绑定方式** + :::note + 我们只推荐使用下面两种形式的 SQL 进行参数绑定写入: + + ```sql + 一、确定子表存在: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?) + 二、自动建表: + 1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?) + 2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?) + ``` + + ::: + 下面我们继续以智能电表为例,展示各语言连接器使用参数绑定高效写入的功能: 1. 准备一个参数化的 SQL 插入语句,用于向超级表 `meters` 中插入数据。这个语句允许动态地指定子表名、标签和列值。 2. 循环生成多个子表及其对应的数据行。对于每个子表: diff --git a/docs/zh/10-third-party/01-collection/12-flink.md b/docs/zh/10-third-party/01-collection/12-flink.md index e085d2fd53..0f8bde5260 100644 --- a/docs/zh/10-third-party/01-collection/12-flink.md +++ b/docs/zh/10-third-party/01-collection/12-flink.md @@ -24,7 +24,8 @@ Flink Connector 支持所有能运行 Flink 1.19 及以上版本的平台。 ## 版本历史 | Flink Connector 版本 | 主要变化 | TDengine 版本 | | ------------------| ------------------------------------ | ---------------- | -| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据
2. 支持 CDC 订阅 TDengine 数据库中的数据
3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.0 及以上版本 | +| 2.0.1 | Sink 支持对所有继承自 RowData 并已实现的类型进行数据写入| - | +| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据
2. 支持 CDC 订阅 TDengine 数据库中的数据
3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.1 及以上版本 | | 1.0.0 | 支持 Sink 功能,将来着其他数据源的数据写入到 TDengine| 3.3.2.0 及以上版本| ## 异常和错误码 @@ -111,7 +112,7 @@ env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE); com.taosdata.flink flink-connector-tdengine - 2.0.0 + 2.0.1 ``` diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index ff1d7617d8..0b7189897c 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -73,12 +73,7 @@ taosd 命令行参数如下 |minReservedMemorySize | |不支持动态修改 |最小预留的系统可用内存数量,除预留外的内存都可以被用于查询,单位:MB,默认预留大小为系统物理内存的 20%,取值范围 1024 - 1000000000| |singleQueryMaxMemorySize| |不支持动态修改 |单个查询在单个节点(dnode)上可以使用的内存上限,超过该上限将返回错误,单位:MB,默认值:0(无上限),取值范围 0 - 1000000000| |filterScalarMode | |不支持动态修改 |强制使用标量过滤模式,0:关闭;1:开启,默认值 0| -|queryPlannerTrace | |支持动态修改 立即生效 |内部参数,查询计划是否输出详细日志| -|queryNodeChunkSize | |支持动态修改 立即生效 |内部参数,查询计划的块大小| -|queryUseNodeAllocator | |支持动态修改 立即生效 |内部参数,查询计划的分配方法| -|queryMaxConcurrentTables| |不支持动态修改 |内部参数,查询计划的并发数目| |queryRsmaTolerance | |不支持动态修改 |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒| -|enableQueryHb | |支持动态修改 立即生效 |内部参数,是否发送查询心跳消息| |pqSortMemThreshold | |不支持动态修改 |内部参数,排序使用的内存阈值| ### 区域相关 @@ -194,7 +189,7 @@ charset 的有效值是 UTF-8。 |numOfQnodeQueryThreads | |支持动态修改 重启生效 |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)| |numOfSnodeSharedThreads | |支持动态修改 重启生效 |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| |numOfSnodeUniqueThreads | |支持动态修改 重启生效 |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)| -|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4| +|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 0.5| |ttlUnit | |不支持动态修改 |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400| |ttlPushInterval | |支持动态修改 立即生效 |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10| |ttlChangeOnWrite | |支持动态修改 立即生效 |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0| diff --git a/docs/zh/14-reference/02-tools/10-taosbenchmark.md b/docs/zh/14-reference/02-tools/10-taosbenchmark.md index 44dab0ad5f..2b60288385 100644 --- a/docs/zh/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/zh/14-reference/02-tools/10-taosbenchmark.md @@ -319,7 +319,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000 - **min** : 数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。 -- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 +- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最大值。 - **scalingFactor** : 浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度:scalingFactor 为 10 表示增强 1 位小数精度,100 表示增强 2 位,依此类推。 diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md index 32df6c60c1..53d52a3e96 100644 --- a/docs/zh/14-reference/03-taos-sql/02-database.md +++ b/docs/zh/14-reference/03-taos-sql/02-database.md @@ -67,7 +67,7 @@ database_option: { - KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/) - KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。 -- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。 +- STT_TRIGGER:表示落盘文件触发文件合并的个数。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。 - SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。 - 0:表示可以创建多张超级表。 - 1:表示只可以创建一张超级表。 @@ -146,10 +146,6 @@ alter_database_option: { 如果 cacheload 非常接近 cachesize,则 cachesize 可能过小。 如果 cacheload 明显小于 cachesize 则 cachesize 是够用的。可以根据这个原则判断是否需要修改 cachesize 。具体修改值可以根据系统可用内存情况来决定是加倍或者是提高几倍。 -4. stt_trigger - -在修改 stt_trigger 参数之前请先停止数据库写入。 - :::note 其它参数在 3.0.0.0 中暂不支持修改 @@ -209,7 +205,7 @@ REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3 BALANCE VGROUP LEADER ``` -触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。 +触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。(企业版功能) ## 查看数据库工作状态 diff --git a/docs/zh/14-reference/03-taos-sql/10-function.md b/docs/zh/14-reference/03-taos-sql/10-function.md index eb3a4bb0ed..c0e80e80df 100644 --- a/docs/zh/14-reference/03-taos-sql/10-function.md +++ b/docs/zh/14-reference/03-taos-sql/10-function.md @@ -2099,7 +2099,7 @@ ignore_negative: { **使用说明**: -- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE(col1, 1s, 1) from tb1。 ### DIFF diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index 7d5096bb66..9325396eb8 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -33,6 +33,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 | | ------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| 3.5.2 | 解决了 WebSocket 查询结果集释放 bug | - | | 3.5.1 | 解决了数据订阅获取时间戳对象类型问题 | - | | 3.5.0 | 1. 优化了 WebSocket 连接参数绑定性能,支持参数绑定查询使用二进制数据
2. 优化了 WebSocket 连接在小查询上的性能
3. WebSocket 连接上支持设置时区和应用信息 | 3.3.5.0 及更高版本 | | 3.4.0 | 1. 使用 jackson 库替换 fastjson 库
2. WebSocket 采用独立协议标识
3. 优化后台拉取线程使用,避免用户误用导致超时 | - | diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index 356777acdc..88c07a89f4 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -24,6 +24,10 @@ TDengine 3.x 各版本安装包下载链接如下: import Release from "/components/ReleaseV3"; +## 3.3.5.2 + + + ## 3.3.5.0 diff --git a/docs/zh/28-releases/03-notes/3.3.5.2.md b/docs/zh/28-releases/03-notes/3.3.5.2.md new file mode 100755 index 0000000000..dc2734c50b --- /dev/null +++ b/docs/zh/28-releases/03-notes/3.3.5.2.md @@ -0,0 +1,42 @@ +--- +title: 3.3.5.2 版本说明 +sidebar_label: 3.3.5.2 +description: 3.3.5.2 版本说明 +--- + +## 特性 + 1. 特性:taosX MQTT 数据源支持根据模板创建多个超级表 + +## 优化 + 1. 优化:改进 taosX 数据库不可用时的错误信息 + 2. 优化:使用 Poetry 标准管理依赖项并减少 Python 连接器安装依赖项 [#251](https://github.com/taosdata/taos-connector-python/issues/251) + 3. 优化:taosX 增量备份和恢复优化 + 4. 优化:在多级存储数据迁移过程中,如果迁移时间过长,可能会导致 Vnode 切主 + 5. 优化:调整 systemctl 守护 taosd 进程的策略,如果 60 秒内连续三次重启失败,下次重启将推迟至 900 秒后 + +## 修复 + 1. 修复:maxRetryWaitTime 参数用于控制当集群无法提供服务时客户端的最大重连超时时间,但在遇到 Sync timeout 错误时,该参数不生效 + 2. 修复:支持在修改子表的 tag 值后,即时订阅到更新后的 tag 值 + 3. 修复:数据订阅的 tmq_consumer_poll 函数调用失败时没有返回错误码 + 4. 修复:当创建超过 100 个视图并执行 show views 命令时,taosd 可能会发生崩溃 + 5. 修复:当使用 stmt2 写入数据时,如果未绑定所有的数据列,写入操作将会失败 + 6. 修复:当使用 stmt2 写入数据时,如果数据库名或表名使用了反引号,写入操作将会失败 + 7. 修复:关闭 vnode 时如果有正在进行的文件合并任务,taosd 可能会崩溃 + 8. 修复:频繁执行 drop table with `tb_uid` 语句可能导致 taosd 死锁 + 9. 修复:日志文件切换过程中可能出现的死锁问题 + 10. 修复:禁止创建与系统库(information_schema, performance_schema)同名的数据库 + 11. 修复:当嵌套查询的内层查询来源于超级表时,排序信息无法被上推 + 12. 修复:通过 STMT 接口尝试写入不符合拓扑规范的 Geometry 数据类型时误报错误 + 13. 修复:在查询语句中使用 percentile 函数和会话窗口时,如果出现错误,taosd 可能会崩溃 + 14. 修复:无法动态修改系统参数的问题 + 15. 修复:订阅同步偶发 Translict transaction 错误 + 16. 修复:同一消费者在执行取消订阅操作后,立即尝试订阅其他不同的主题时,会返回错误 + 17. 修复:Go 连接器安全修复 CVE-2022-28948 + 18. 修复:当视图中的子查询包含带别名的 ORDER BY 子句,并且查询函数自身也带有别名时,查询该视图会引发错误 + 19. 修复:在将数据库从单副本修改为多副本时,如果存在一些由较早版本生成且在新版本中已不再使用的元数据,会导致修改操作失败 + 20. 修复:在使用 SELECT * FROM 子查询时,列名未能正确复制到外层查询 + 21. 修复:对字符串类型数据执行 max/min 函数时,结果不准确且 taosd 可能会崩溃 + 22. 修复:流式计算不支持使用 HAVING 语句,但在创建时未报告错误 + 23. 修复:taos shell 显示的服务端版本信息不准确,例如无法正确区分社区版和企业版 + 24. 修复:在某些特定的查询场景下,当 JOIN 和 CAST 联合使用时,taosd 可能会崩溃 + diff --git a/docs/zh/28-releases/03-notes/index.md b/docs/zh/28-releases/03-notes/index.md index 27898aa2df..420ab4a54d 100644 --- a/docs/zh/28-releases/03-notes/index.md +++ b/docs/zh/28-releases/03-notes/index.md @@ -4,6 +4,7 @@ sidebar_label: 版本说明 description: 各版本版本说明 --- +[3.3.5.2](./3.3.5.2) [3.3.5.0](./3.3.5.0) [3.3.4.8](./3.3.4.8) [3.3.4.3](./3.3.4.3) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 0450766535..c30f2ab4ec 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -160,6 +160,7 @@ typedef enum EStreamType { STREAM_PARTITION_DELETE_DATA, STREAM_GET_RESULT, STREAM_DROP_CHILD_TABLE, + STREAM_NOTIFY_EVENT, } EStreamType; #pragma pack(push, 1) @@ -408,6 +409,9 @@ typedef struct STUidTagInfo { #define UD_GROUPID_COLUMN_INDEX 1 #define UD_TAG_COLUMN_INDEX 2 +// stream notify event block column +#define NOTIFY_EVENT_STR_COLUMN_INDEX 0 + int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime); int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol); diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 1103b89ccb..96478047ca 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -285,6 +285,8 @@ bool isAutoTableName(char* ctbName); int32_t buildCtbNameAddGroupId(const char* stbName, char* ctbName, uint64_t groupId, size_t cap); int32_t buildCtbNameByGroupId(const char* stbName, uint64_t groupId, char** pName); int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf); +int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule, + char** dstTableName); int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList); diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 6beb7c8860..4e9a9bd801 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -34,6 +34,9 @@ extern "C" { #define GLOBAL_CONFIG_FILE_VERSION 1 #define LOCAL_CONFIG_FILE_VERSION 1 +#define RPC_MEMORY_USAGE_RATIO 0.1 +#define QUEUE_MEMORY_USAGE_RATIO 0.6 + typedef enum { DND_CA_SM4 = 1, } EEncryptAlgor; @@ -110,6 +113,7 @@ extern int32_t tsNumOfQnodeFetchThreads; extern int32_t tsNumOfSnodeStreamThreads; extern int32_t tsNumOfSnodeWriteThreads; extern int64_t tsQueueMemoryAllowed; +extern int64_t tsApplyMemoryAllowed; extern int32_t tsRetentionSpeedLimitMB; extern int32_t tsNumOfCompactThreads; diff --git a/include/common/tmsg.h b/include/common/tmsg.h index aebe09b563..82eaa2359e 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -269,6 +269,7 @@ typedef enum ENodeType { QUERY_NODE_TSMA_OPTIONS, QUERY_NODE_ANOMALY_WINDOW, QUERY_NODE_RANGE_AROUND, + QUERY_NODE_STREAM_NOTIFY_OPTIONS, // Statement nodes are used in parser and planner module. QUERY_NODE_SET_OPERATOR = 100, @@ -2956,6 +2957,11 @@ typedef struct { // 3.3.0.0 SArray* pCols; // array of SField int64_t smaId; + // 3.3.6.0 + SArray* pNotifyAddrUrls; + int32_t notifyEventTypes; + int32_t notifyErrorHandle; + int8_t notifyHistory; } SCMCreateStreamReq; typedef struct { diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 883c5f7b99..9a7c3912b0 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -98,6 +98,9 @@ int32_t qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId); int32_t qSetStreamOpOpen(qTaskInfo_t tinfo); +int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper, + const char* stbFullName, bool newSubTableRule); + /** * Set multiple input data blocks for the stream scan. * @param tinfo diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index 12d77bd0c2..26482a87d4 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -566,19 +566,44 @@ typedef struct SStreamOptions { int64_t setFlag; } SStreamOptions; +typedef enum EStreamNotifyOptionSetFlag { + SNOTIFY_OPT_ERROR_HANDLE_SET = BIT_FLAG_MASK(0), + SNOTIFY_OPT_NOTIFY_HISTORY_SET = BIT_FLAG_MASK(1), +} EStreamNotifyOptionSetFlag; + +typedef enum EStreamNotifyEventType { + SNOTIFY_EVENT_WINDOW_OPEN = BIT_FLAG_MASK(0), + SNOTIFY_EVENT_WINDOW_CLOSE = BIT_FLAG_MASK(1), +} EStreamNotifyEventType; + +typedef enum EStreamNotifyErrorHandleType { + SNOTIFY_ERROR_HANDLE_PAUSE, + SNOTIFY_ERROR_HANDLE_DROP, +} EStreamNotifyErrorHandleType; + +typedef struct SStreamNotifyOptions { + ENodeType type; + SNodeList* pAddrUrls; + EStreamNotifyEventType eventTypes; + EStreamNotifyErrorHandleType errorHandle; + bool notifyHistory; + EStreamNotifyOptionSetFlag setFlag; +} SStreamNotifyOptions; + typedef struct SCreateStreamStmt { - ENodeType type; - char streamName[TSDB_TABLE_NAME_LEN]; - char targetDbName[TSDB_DB_NAME_LEN]; - char targetTabName[TSDB_TABLE_NAME_LEN]; - bool ignoreExists; - SStreamOptions* pOptions; - SNode* pQuery; - SNode* pPrevQuery; - SNodeList* pTags; - SNode* pSubtable; - SNodeList* pCols; - SCMCreateStreamReq* pReq; + ENodeType type; + char streamName[TSDB_TABLE_NAME_LEN]; + char targetDbName[TSDB_DB_NAME_LEN]; + char targetTabName[TSDB_TABLE_NAME_LEN]; + bool ignoreExists; + SStreamOptions* pOptions; + SNode* pQuery; + SNode* pPrevQuery; + SNodeList* pTags; + SNode* pSubtable; + SNodeList* pCols; + SStreamNotifyOptions* pNotifyOptions; + SCMCreateStreamReq* pReq; } SCreateStreamStmt; typedef struct SDropStreamStmt { diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index a4d89dcdcc..9cd6dd13ca 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -65,10 +65,14 @@ typedef struct SStreamTaskSM SStreamTaskSM; typedef struct SStreamQueueItem SStreamQueueItem; typedef struct SActiveCheckpointInfo SActiveCheckpointInfo; -#define SSTREAM_TASK_VER 4 -#define SSTREAM_TASK_INCOMPATIBLE_VER 1 -#define SSTREAM_TASK_NEED_CONVERT_VER 2 -#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 +#define SSTREAM_TASK_VER 5 +#define SSTREAM_TASK_INCOMPATIBLE_VER 1 +#define SSTREAM_TASK_NEED_CONVERT_VER 2 +#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 // Append subtable name with groupId +#define SSTREAM_TASK_APPEND_STABLE_NAME_VER 4 // Append subtable name with stableName and groupId +#define SSTREAM_TASK_ADD_NOTIFY_VER 5 // Support event notification at window open/close + +#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) extern int32_t streamMetaRefPool; extern int32_t streamTaskRefPool; @@ -427,6 +431,15 @@ typedef struct STaskCheckInfo { TdThreadMutex checkInfoLock; } STaskCheckInfo; +typedef struct SNotifyInfo { + SArray* pNotifyAddrUrls; + int32_t notifyEventTypes; + int32_t notifyErrorHandle; + char* streamName; + char* stbFullName; + SSchemaWrapper* pSchemaWrapper; +} SNotifyInfo; + struct SStreamTask { int64_t ver; SStreamTaskId id; @@ -449,6 +462,7 @@ struct SStreamTask { SStreamState* pState; // state backend SUpstreamInfo upstreamInfo; STaskCheckInfo taskCheckInfo; + SNotifyInfo notifyInfo; // the followings attributes don't be serialized SScanhistorySchedInfo schedHistoryInfo; diff --git a/include/util/tdef.h b/include/util/tdef.h index 8de5c82fdb..f08697b0d4 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -245,6 +245,7 @@ typedef enum ELogicConditionType { #define TSDB_OFFSET_LEN 64 // it is a null-terminated string #define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string #define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string +#define TSDB_STREAM_NOTIFY_URL_LEN 128 // it includes the terminating '\0' #define TSDB_DB_NAME_LEN 65 #define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN) #define TSDB_PRIVILEDGE_CONDITION_LEN 48 * 1024 @@ -460,13 +461,13 @@ typedef enum ELogicConditionType { #define TSDB_DB_SCHEMALESS_OFF 0 #define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF #define TSDB_MIN_STT_TRIGGER 1 -#ifdef TD_ENTERPRISE +// #ifdef TD_ENTERPRISE #define TSDB_MAX_STT_TRIGGER 16 #define TSDB_DEFAULT_SST_TRIGGER 2 -#else -#define TSDB_MAX_STT_TRIGGER 1 -#define TSDB_DEFAULT_SST_TRIGGER 1 -#endif +// #else +// #define TSDB_MAX_STT_TRIGGER 1 +// #define TSDB_DEFAULT_SST_TRIGGER 1 +// #endif #define TSDB_STT_TRIGGER_ARRAY_SIZE 16 // maximum of TSDB_MAX_STT_TRIGGER of TD_ENTERPRISE and TD_COMMUNITY #define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN) #define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2) diff --git a/include/util/tlog.h b/include/util/tlog.h index f573d61e73..60ddc29288 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -79,6 +79,9 @@ void taosResetLog(); void taosDumpData(uint8_t *msg, int32_t len); void taosSetNoNewFile(); +// Fast uint64_t to string conversion, equivalent to sprintf(buf, "%lu", val) but with 10x better performance. +char *u64toaFastLut(uint64_t val, char *buf); + void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) #ifdef __GNUC__ __attribute__((format(printf, 4, 5))) diff --git a/include/util/tqueue.h b/include/util/tqueue.h index 5ae642b69f..1d634ce742 100644 --- a/include/util/tqueue.h +++ b/include/util/tqueue.h @@ -55,6 +55,7 @@ typedef struct { typedef enum { DEF_QITEM = 0, RPC_QITEM = 1, + APPLY_QITEM = 2, } EQItype; typedef void (*FItem)(SQueueInfo *pInfo, void *pItem); diff --git a/packaging/setup_env.sh b/packaging/setup_env.sh index e1a7a26579..32451072ab 100644 --- a/packaging/setup_env.sh +++ b/packaging/setup_env.sh @@ -174,6 +174,7 @@ help() { echo " config_qemu_guest_agent - Configure QEMU guest agent" echo " deploy_docker - Deploy Docker" echo " deploy_docker_compose - Deploy Docker Compose" + echo " install_trivy - Install Trivy" echo " clone_enterprise - Clone the enterprise repository" echo " clone_community - Clone the community repository" echo " clone_taosx - Clone TaosX repository" @@ -316,6 +317,17 @@ add_config_if_not_exist() { grep -qF -- "$config" "$file" || echo "$config" >> "$file" } +# Function to check if a tool is installed +check_installed() { + local command_name="$1" + if command -v "$command_name" >/dev/null 2>&1; then + echo "$command_name is already installed. Skipping installation." + return 0 + else + echo "$command_name is not installed." + return 1 + fi +} # General error handling function check_status() { local message_on_failure="$1" @@ -584,9 +596,12 @@ centos_skip_check() { # Deploy cmake deploy_cmake() { # Check if cmake is installed - if command -v cmake >/dev/null 2>&1; then - echo "Cmake is already installed. Skipping installation." - cmake --version + # if command -v cmake >/dev/null 2>&1; then + # echo "Cmake is already installed. Skipping installation." + # cmake --version + # return + # fi + if check_installed "cmake"; then return fi install_package "cmake3" @@ -1058,11 +1073,13 @@ deploy_go() { GOPATH_DIR="/root/go" # Check if Go is installed - if command -v go >/dev/null 2>&1; then - echo "Go is already installed. Skipping installation." + # if command -v go >/dev/null 2>&1; then + # echo "Go is already installed. Skipping installation." + # return + # fi + if check_installed "go"; then return fi - # Fetch the latest version number of Go GO_LATEST_DATA=$(curl --retry 10 --retry-delay 5 --retry-max-time 120 -s https://golang.google.cn/VERSION?m=text) GO_LATEST_VERSION=$(echo "$GO_LATEST_DATA" | grep -oP 'go[0-9]+\.[0-9]+\.[0-9]+') @@ -1731,6 +1748,42 @@ deploy_docker_compose() { fi } +# Instal trivy +install_trivy() { + echo -e "${YELLOW}Installing Trivy...${NO_COLOR}" + # Check if Trivy is already installed + # if command -v trivy >/dev/null 2>&1; then + # echo "Trivy is already installed. Skipping installation." + # trivy --version + # return + # fi + if check_installed "trivy"; then + return + fi + # Install jq + install_package jq + # Get latest version + LATEST_VERSION=$(curl -s https://api.github.com/repos/aquasecurity/trivy/releases/latest | jq -r .tag_name) + # Download + if [ -f /etc/debian_version ]; then + wget https://github.com/aquasecurity/trivy/releases/download/"${LATEST_VERSION}"/trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb + # Install + dpkg -i trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb + + elif [ -f /etc/redhat-release ]; then + wget https://github.com/aquasecurity/trivy/releases/download/"${LATEST_VERSION}"/trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm + # Install + rpm -ivh trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm + else + echo "Unsupported Linux distribution." + exit 1 + fi + # Check + trivy --version + check_status "Failed to install Trivy" "Trivy installed successfully." $? + rm -rf trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm +} + # Reconfigure cloud-init reconfig_cloud_init() { echo "Reconfiguring cloud-init..." @@ -2004,6 +2057,7 @@ deploy_dev() { install_nginx deploy_docker deploy_docker_compose + install_trivy check_status "Failed to deploy some tools" "Deploy all tools successfully" $? } @@ -2159,6 +2213,9 @@ main() { deploy_docker_compose) deploy_docker_compose ;; + install_trivy) + install_trivy + ;; clone_enterprise) clone_enterprise ;; diff --git a/packaging/smokeTest/test_smoking_selfhost.sh b/packaging/smokeTest/test_smoking_selfhost.sh index a25c5a6d90..6ed0b9c715 100755 --- a/packaging/smokeTest/test_smoking_selfhost.sh +++ b/packaging/smokeTest/test_smoking_selfhost.sh @@ -6,12 +6,6 @@ SUCCESS_FILE="success.txt" FAILED_FILE="failed.txt" REPORT_FILE="report.txt" -# Initialize/clear result files -> "$SUCCESS_FILE" -> "$FAILED_FILE" -> "$LOG_FILE" -> "$REPORT_FILE" - # Switch to the target directory TARGET_DIR="../../tests/system-test/" @@ -24,6 +18,12 @@ else exit 1 fi +# Initialize/clear result files +> "$SUCCESS_FILE" +> "$FAILED_FILE" +> "$LOG_FILE" +> "$REPORT_FILE" + # Define the Python commands to execute commands=( "python3 ./test.py -f 2-query/join.py" @@ -102,4 +102,4 @@ fi echo "Detailed logs can be found in: $(realpath "$LOG_FILE")" echo "Successful commands can be found in: $(realpath "$SUCCESS_FILE")" echo "Failed commands can be found in: $(realpath "$FAILED_FILE")" -echo "Test report can be found in: $(realpath "$REPORT_FILE")" \ No newline at end of file +echo "Test report can be found in: $(realpath "$REPORT_FILE")" diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 5bbfd2a0de..43c2de4ba4 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -90,7 +90,7 @@ fi kill_service_of() { _service=$1 - pid=$(ps -C $_service | grep -v $uninstallScript | awk '{print $2}') + pid=$(ps -C $_service | grep -w $_service | grep -v $uninstallScript | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi @@ -140,9 +140,8 @@ clean_service_of() { clean_service_on_systemd_of $_service elif ((${service_mod} == 1)); then clean_service_on_sysvinit_of $_service - else - kill_service_of $_service fi + kill_service_of $_service } remove_service_of() { diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 7798bbf16a..1d2965f66b 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -40,7 +40,7 @@ if command -v sudo > /dev/null; then fi function kill_client() { - pid=$(ps -C ${clientName2} | grep -v $uninstallScript2 | awk '{print $2}') + pid=$(ps -C ${clientName2} | grep -w ${clientName2} | grep -v $uninstallScript2 | awk '{print $1}') if [ -n "$pid" ]; then ${csudo}kill -9 $pid || : fi diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h index 3540dc5c68..35bfa66f72 100644 --- a/source/client/inc/clientStmt.h +++ b/source/client/inc/clientStmt.h @@ -131,6 +131,8 @@ typedef struct SStmtQueue { SStmtQNode* head; SStmtQNode* tail; uint64_t qRemainNum; + TdThreadMutex mutex; + TdThreadCond waitCond; } SStmtQueue; typedef struct STscStmt { diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 83aff351dd..190a724151 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -253,7 +253,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); - // cleanupAppInfo(); + cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 4b993ccc1e..4f912ec077 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -39,31 +39,39 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void** } bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) { - while (0 == atomic_load_64(&pStmt->queue.qRemainNum)) { - taosUsleep(1); - return false; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) { + (void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex); + if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) { + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + return false; + } } - SStmtQNode* orig = pStmt->queue.head; - SStmtQNode* node = pStmt->queue.head->next; pStmt->queue.head = pStmt->queue.head->next; - - // taosMemoryFreeClear(orig); - *param = node; - (void)atomic_sub_fetch_64(&pStmt->queue.qRemainNum, 1); + (void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + + + *param = node; return true; } void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); + pStmt->queue.tail->next = param; pStmt->queue.tail = param; pStmt->stat.bindDataNum++; (void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); } static int32_t stmtCreateRequest(STscStmt* pStmt) { @@ -415,9 +423,11 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) { pTblBuf->buffIdx = 1; pTblBuf->buffOffset = sizeof(*pQueue->head); + (void)taosThreadMutexLock(&pQueue->mutex); pQueue->head = pQueue->tail = pTblBuf->pCurBuff; pQueue->qRemainNum = 0; pQueue->head->next = NULL; + (void)taosThreadMutexUnlock(&pQueue->mutex); } int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) { @@ -809,6 +819,8 @@ int32_t stmtStartBindThread(STscStmt* pStmt) { } int32_t stmtInitQueue(STscStmt* pStmt) { + (void)taosThreadCondInit(&pStmt->queue.waitCond, NULL); + (void)taosThreadMutexInit(&pStmt->queue.mutex, NULL); STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head)); pStmt->queue.tail = pStmt->queue.head; @@ -1619,11 +1631,18 @@ int stmtClose(TAOS_STMT* stmt) { pStmt->queue.stopQueue = true; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + if (pStmt->bindThreadInUse) { (void)taosThreadJoin(pStmt->bindThread, NULL); pStmt->bindThreadInUse = false; } + (void)taosThreadCondDestroy(&pStmt->queue.waitCond); + (void)taosThreadMutexDestroy(&pStmt->queue.mutex); + STMT_DLOG("stmt %p closed, stbInterlaceMode: %d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64 ", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64 ", settbnameAPI:%u, bindAPI:%u, addbatchAPI:%u, execAPI:%u" @@ -1757,7 +1776,9 @@ _return: } int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { + int code = 0; STscStmt* pStmt = (STscStmt*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get param num"); @@ -1765,7 +1786,7 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { return pStmt->errCode; } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -1777,23 +1798,29 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } if (STMT_TYPE_QUERY == pStmt->sql.type) { *nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues); } else { - STMT_ERR_RET(stmtFetchColFields(stmt, nums, NULL)); + STMT_ERRI_JRET(stmtFetchColFields(stmt, nums, NULL)); } - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { + int code = 0; STscStmt* pStmt = (STscStmt*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get param"); @@ -1802,10 +1829,10 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { } if (STMT_TYPE_QUERY == pStmt->sql.type) { - STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR); + STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -1817,27 +1844,29 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } int32_t nums = 0; TAOS_FIELD_E* pField = NULL; - STMT_ERR_RET(stmtFetchColFields(stmt, &nums, &pField)); + STMT_ERRI_JRET(stmtFetchColFields(stmt, &nums, &pField)); if (idx >= nums) { tscError("idx %d is too big", idx); - taosMemoryFree(pField); - STMT_ERR_RET(TSDB_CODE_INVALID_PARA); + STMT_ERRI_JRET(TSDB_CODE_INVALID_PARA); } *type = pField[idx].type; *bytes = pField[idx].bytes; - taosMemoryFree(pField); +_return: - return TSDB_CODE_SUCCESS; + taosMemoryFree(pField); + pStmt->errCode = preCode; + + return code; } TAOS_RES* stmtUseResult(TAOS_STMT* stmt) { diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index d9b2b4b2c5..df92ea6dfd 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -39,31 +39,35 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void** } static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) { - taosUsleep(1); - return false; + (void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex); + if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) { + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + return false; + } } - SStmtQNode* orig = pStmt->queue.head; - SStmtQNode* node = pStmt->queue.head->next; pStmt->queue.head = pStmt->queue.head->next; - - // taosMemoryFreeClear(orig); - *param = node; (void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); return true; } static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) { + (void)taosThreadMutexLock(&pStmt->queue.mutex); + pStmt->queue.tail->next = param; pStmt->queue.tail = param; - pStmt->stat.bindDataNum++; (void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); } static int32_t stmtCreateRequest(STscStmt2* pStmt) { @@ -339,9 +343,11 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) { pTblBuf->buffIdx = 1; pTblBuf->buffOffset = sizeof(*pQueue->head); + (void)taosThreadMutexLock(&pQueue->mutex); pQueue->head = pQueue->tail = pTblBuf->pCurBuff; pQueue->qRemainNum = 0; pQueue->head->next = NULL; + (void)taosThreadMutexUnlock(&pQueue->mutex); } static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) { @@ -735,6 +741,8 @@ static int32_t stmtStartBindThread(STscStmt2* pStmt) { } static int32_t stmtInitQueue(STscStmt2* pStmt) { + (void)taosThreadCondInit(&pStmt->queue.waitCond, NULL); + (void)taosThreadMutexInit(&pStmt->queue.mutex, NULL); STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head)); pStmt->queue.tail = pStmt->queue.head; @@ -1066,13 +1074,16 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E } static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_ALL** fields) { + int32_t code = 0; + int32_t preCode = pStmt->errCode; + if (pStmt->errCode != TSDB_CODE_SUCCESS) { return pStmt->errCode; } if (STMT_TYPE_QUERY == pStmt->sql.type) { tscError("invalid operation to get query column fileds"); - STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR); + STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR); } STableDataCxt** pDataBlock = NULL; @@ -1084,21 +1095,25 @@ static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIEL (STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)); if (NULL == pDataBlock) { tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName); - STMT_ERR_RET(TSDB_CODE_APP_ERROR); + STMT_ERRI_JRET(TSDB_CODE_APP_ERROR); } } - STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields)); + STMT_ERRI_JRET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields)); if (pStmt->bInfo.tbType == TSDB_SUPER_TABLE) { pStmt->bInfo.needParse = true; qDestroyStmtDataBlock(*pDataBlock); if (taosHashRemove(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)) != 0) { tscError("get fileds %s remove exec blockHash fail", pStmt->bInfo.tbFName); - STMT_ERR_RET(TSDB_CODE_APP_ERROR); + STMT_ERRI_JRET(TSDB_CODE_APP_ERROR); } } - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } /* SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) { @@ -1752,11 +1767,18 @@ int stmtClose2(TAOS_STMT2* stmt) { pStmt->queue.stopQueue = true; + (void)taosThreadMutexLock(&pStmt->queue.mutex); + (void)taosThreadCondSignal(&(pStmt->queue.waitCond)); + (void)taosThreadMutexUnlock(&pStmt->queue.mutex); + if (pStmt->bindThreadInUse) { (void)taosThreadJoin(pStmt->bindThread, NULL); pStmt->bindThreadInUse = false; } + (void)taosThreadCondDestroy(&pStmt->queue.waitCond); + (void)taosThreadMutexDestroy(&pStmt->queue.mutex); + if (pStmt->options.asyncExecFn && !pStmt->semWaited) { if (tsem_wait(&pStmt->asyncQuerySem) != 0) { tscError("failed to wait asyncQuerySem"); @@ -1828,7 +1850,7 @@ int stmtParseColFields2(TAOS_STMT2* stmt) { if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) { taos_free_result(pStmt->exec.pRequest); pStmt->exec.pRequest = NULL; - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); } STMT_ERRI_JRET(stmtCreateRequest(pStmt)); @@ -1854,7 +1876,9 @@ int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_ALL** fields) { } int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { + int32_t code = 0; STscStmt2* pStmt = (STscStmt2*)stmt; + int32_t preCode = pStmt->errCode; STMT_DLOG_E("start to get param num"); @@ -1862,7 +1886,7 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { return pStmt->errCode; } - STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); + STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS)); if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 && STMT_TYPE_MULTI_INSERT != pStmt->sql.type) { @@ -1874,19 +1898,23 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { pStmt->exec.pRequest = NULL; } - STMT_ERR_RET(stmtCreateRequest(pStmt)); + STMT_ERRI_JRET(stmtCreateRequest(pStmt)); if (pStmt->bInfo.needParse) { - STMT_ERR_RET(stmtParseSql(pStmt)); + STMT_ERRI_JRET(stmtParseSql(pStmt)); } if (STMT_TYPE_QUERY == pStmt->sql.type) { *nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues); } else { - STMT_ERR_RET(stmtFetchColFields2(stmt, nums, NULL)); + STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, NULL)); } - return TSDB_CODE_SUCCESS; +_return: + + pStmt->errCode = preCode; + + return code; } TAOS_RES* stmtUseResult2(TAOS_STMT2* stmt) { diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index f4426fc94a..007a23720c 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -74,8 +74,9 @@ enum { }; typedef struct { - tmr_h timer; - int32_t rsetId; + tmr_h timer; + int32_t rsetId; + TdThreadMutex lock; } SMqMgmt; struct tmq_list_t { @@ -1603,13 +1604,21 @@ static void tmqMgmtInit(void) { tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ"); if (tmqMgmt.timer == NULL) { - tmqInitRes = terrno; + goto END; } tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl); if (tmqMgmt.rsetId < 0) { - tmqInitRes = terrno; + goto END; } + + if (taosThreadMutexInit(&tmqMgmt.lock, NULL) != 0){ + goto END; + } + return; + +END: + tmqInitRes = terrno; } void tmqMgmtClose(void) { @@ -1618,10 +1627,28 @@ void tmqMgmtClose(void) { tmqMgmt.timer = NULL; } + (void) taosThreadMutexLock(&tmqMgmt.lock); if (tmqMgmt.rsetId >= 0) { + tmq_t *tmq = taosIterateRef(tmqMgmt.rsetId, 0); + int64_t refId = 0; + + while (tmq) { + refId = tmq->refId; + if (refId == 0) { + break; + } + atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED); + + if (taosRemoveRef(tmqMgmt.rsetId, tmq->refId) != 0) { + qWarn("taosRemoveRef tmq refId:%" PRId64 " failed, error:%s", refId, tstrerror(terrno)); + } + + tmq = taosIterateRef(tmqMgmt.rsetId, refId); + } taosCloseRef(tmqMgmt.rsetId); tmqMgmt.rsetId = -1; } + (void)taosThreadMutexUnlock(&tmqMgmt.lock); } tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) { @@ -2617,8 +2644,13 @@ int32_t tmq_unsubscribe(tmq_t* tmq) { int32_t tmq_consumer_close(tmq_t* tmq) { if (tmq == NULL) return TSDB_CODE_INVALID_PARA; + int32_t code = 0; + (void) taosThreadMutexLock(&tmqMgmt.lock); + if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__CLOSED){ + goto end; + } tqInfoC("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status); - int32_t code = tmq_unsubscribe(tmq); + code = tmq_unsubscribe(tmq); if (code == 0) { atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED); code = taosRemoveRef(tmqMgmt.rsetId, tmq->refId); @@ -2626,6 +2658,9 @@ int32_t tmq_consumer_close(tmq_t* tmq) { tqErrorC("tmq close failed to remove ref:%" PRId64 ", code:%d", tmq->refId, code); } } + +end: + (void)taosThreadMutexUnlock(&tmqMgmt.lock); return code; } diff --git a/source/client/test/stmt2Test.cpp b/source/client/test/stmt2Test.cpp index 52c89e97ab..91f884941f 100644 --- a/source/client/test/stmt2Test.cpp +++ b/source/client/test/stmt2Test.cpp @@ -735,7 +735,7 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) { { const char* sql = "insert into stmt2_testdb_4.? values(?,?)"; printf("case 2 : %s\n", sql); - getFieldsError(taos, sql, TSDB_CODE_PAR_TABLE_NOT_EXIST); + getFieldsError(taos, sql, TSDB_CODE_TSC_STMT_TBNAME_ERROR); } // case 3 : wrong para nums @@ -1496,8 +1496,51 @@ TEST(stmt2Case, geometry) { checkError(stmt, code); ASSERT_EQ(affected_rows, 3); + // test wrong wkb input + unsigned char wkb2[3][61] = { + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, + }, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}}; + params[1].buffer = wkb2; + code = taos_stmt2_bind_param(stmt, &bindv, -1); + ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE); + taos_stmt2_close(stmt); do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_13"); taos_close(taos); } + +// TD-33582 +TEST(stmt2Case, errcode) { + TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(taos, nullptr); + do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_14"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt2_testdb_14"); + do_query(taos, "use stmt2_testdb_14"); + + TAOS_STMT2_OPTION option = {0}; + TAOS_STMT2* stmt = taos_stmt2_init(taos, &option); + ASSERT_NE(stmt, nullptr); + char* sql = "select * from t where ts > ? and name = ? foo = ?"; + int code = taos_stmt2_prepare(stmt, sql, 0); + checkError(stmt, code); + + int fieldNum = 0; + TAOS_FIELD_ALL* pFields = NULL; + code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields); + ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR); + + // get fail dont influence the next stmt prepare + sql = "nsert into ? (ts, name) values (?, ?)"; + code = taos_stmt_prepare(stmt, sql, 0); + checkError(stmt, code); +} #pragma GCC diagnostic pop diff --git a/source/client/test/stmtTest.cpp b/source/client/test/stmtTest.cpp index 77130e41db..9a716d7f19 100644 --- a/source/client/test/stmtTest.cpp +++ b/source/client/test/stmtTest.cpp @@ -212,15 +212,6 @@ void insertData(TAOS *taos, TAOS_STMT_OPTIONS *option, const char *sql, int CTB_ void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_E *expectedTagFields, int expectedTagFieldNum, TAOS_FIELD_E *expectedColFields, int expectedColFieldNum) { - // create database and table - do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); - do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3"); - do_query(taos, "USE stmt_testdb_3"); - do_query( - taos, - "CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " - "(groupId INT, location BINARY(24))"); - TAOS_STMT *stmt = taos_stmt_init(taos); ASSERT_NE(stmt, nullptr); int code = taos_stmt_prepare(stmt, sql, 0); @@ -267,6 +258,24 @@ void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_ taos_stmt_close(stmt); } +void getFieldsError(TAOS *taos, const char *sql, int expectedErrocode) { + TAOS_STMT *stmt = taos_stmt_init(taos); + ASSERT_NE(stmt, nullptr); + STscStmt *pStmt = (STscStmt *)stmt; + + int code = taos_stmt_prepare(stmt, sql, 0); + + int fieldNum = 0; + TAOS_FIELD_E *pFields = NULL; + code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields); + ASSERT_EQ(code, expectedErrocode); + ASSERT_EQ(pStmt->errCode, TSDB_CODE_SUCCESS); + + taosMemoryFree(pFields); + + taos_stmt_close(stmt); +} + } // namespace int main(int argc, char **argv) { @@ -298,6 +307,15 @@ TEST(stmtCase, get_fields) { TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); ASSERT_NE(taos, nullptr); + // create database and table + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3"); + do_query(taos, "USE stmt_testdb_3"); + do_query( + taos, + "CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " + "(groupId INT, location BINARY(24))"); + // nomarl test { TAOS_FIELD_E tagFields[2] = {{"groupid", TSDB_DATA_TYPE_INT, 0, 0, sizeof(int)}, {"location", TSDB_DATA_TYPE_BINARY, 0, 0, 24}}; @@ -307,6 +325,12 @@ TEST(stmtCase, get_fields) { {"phase", TSDB_DATA_TYPE_FLOAT, 0, 0, sizeof(float)}}; getFields(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 7, &tagFields[0], 2, &colFields[0], 4); } + // error case [TD-33570] + { getFieldsError(taos, "INSERT INTO ? VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); } + + { getFieldsError(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); } + + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3"); taos_close(taos); } @@ -520,9 +544,6 @@ TEST(stmtCase, geometry) { int code = taos_stmt_prepare(stmt, stmt_sql, 0); checkError(stmt, code); - // code = taos_stmt_set_tbname(stmt, "tb1"); - // checkError(stmt, code); - code = taos_stmt_bind_param_batch(stmt, params); checkError(stmt, code); @@ -532,11 +553,58 @@ TEST(stmtCase, geometry) { code = taos_stmt_execute(stmt); checkError(stmt, code); + //test wrong wkb input + unsigned char wkb2[3][61] = { + { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, + }, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}}; + params[1].buffer = wkb2; + code = taos_stmt_bind_param_batch(stmt, params); + ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE); + taosMemoryFree(t64_len); taosMemoryFree(wkb_len); taos_stmt_close(stmt); do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_5"); taos_close(taos); } +//TD-33582 +TEST(stmtCase, errcode) { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + ASSERT_NE(taos, nullptr); + do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_4"); + do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_4"); + do_query(taos, "USE stmt_testdb_4"); + do_query( + taos, + "CREATE STABLE IF NOT EXISTS stmt_testdb_4.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " + "(groupId INT, location BINARY(24))"); + + TAOS_STMT *stmt = taos_stmt_init(taos); + ASSERT_NE(stmt, nullptr); + char *sql = "select * from t where ts > ? and name = ? foo = ?"; + int code = taos_stmt_prepare(stmt, sql, 0); + checkError(stmt, code); + + int fieldNum = 0; + TAOS_FIELD_E *pFields = NULL; + code = stmtGetParamNum(stmt, &fieldNum); + ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR); + + code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields); + ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR); + // get fail dont influence the next stmt prepare + sql = "nsert into ? (ts, name) values (?, ?)"; + code = taos_stmt_prepare(stmt, sql, 0); + checkError(stmt, code); +} #pragma GCC diagnostic pop \ No newline at end of file diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 39380a0644..8dccdaa016 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -54,6 +54,23 @@ target_link_libraries( INTERFACE api ) +if(NOT ${TD_WINDOWS}) + target_include_directories( + common + PUBLIC "$ENV{HOME}/.cos-local.2/include" + ) + + find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) + target_link_libraries( + common + PUBLIC ${CURL_LIBRARY} + PUBLIC ${SSL_LIBRARY} + PUBLIC ${CRYPTO_LIBRARY} + ) +endif() + if(${BUILD_S3}) if(${BUILD_WITH_S3}) target_include_directories( @@ -65,10 +82,6 @@ if(${BUILD_S3}) set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2) find_library(S3_LIBRARY s3) - find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - find_library(XML2_LIBRARY xml2) - find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) - find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH) target_link_libraries( common @@ -77,7 +90,7 @@ if(${BUILD_S3}) PUBLIC ${CURL_LIBRARY} PUBLIC ${SSL_LIBRARY} PUBLIC ${CRYPTO_LIBRARY} - PUBLIC ${XML2_LIBRARY} + PUBLIC _libxml2 ) add_definitions(-DUSE_S3) @@ -88,7 +101,6 @@ if(${BUILD_S3}) find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) find_library(MINIXML_LIBRARY mxml) - find_library(CURL_LIBRARY curl) target_link_libraries( common diff --git a/source/common/src/msg/tmsg.c b/source/common/src/msg/tmsg.c index a3989012f6..7a51669d46 100644 --- a/source/common/src/msg/tmsg.c +++ b/source/common/src/msg/tmsg.c @@ -9959,6 +9959,16 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS } TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->smaId)); + + int32_t addrSize = taosArrayGetSize(pReq->pNotifyAddrUrls); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, addrSize)); + for (int32_t i = 0; i < addrSize; ++i) { + const char *url = taosArrayGetP(pReq->pNotifyAddrUrls, i); + TAOS_CHECK_EXIT((tEncodeCStr(&encoder, url))); + } + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyEventTypes)); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyErrorHandle)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->notifyHistory)); tEndEncode(&encoder); _exit: @@ -10093,6 +10103,30 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->smaId)); } + if (!tDecodeIsEnd(&decoder)) { + int32_t addrSize = 0; + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &addrSize)); + pReq->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES); + if (pReq->pNotifyAddrUrls == NULL) { + TAOS_CHECK_EXIT(terrno); + } + for (int32_t i = 0; i < addrSize; ++i) { + char *url = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(&decoder, &url)); + url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN); + if (url == NULL) { + TAOS_CHECK_EXIT(terrno); + } + if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) { + taosMemoryFree(url); + TAOS_CHECK_EXIT(terrno); + } + } + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyEventTypes)); + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyErrorHandle)); + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->notifyHistory)); + } + tEndDecode(&decoder); _exit: tDecoderClear(&decoder); @@ -10155,6 +10189,7 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) { taosArrayDestroy(pReq->fillNullCols); taosArrayDestroy(pReq->pVgroupVerList); taosArrayDestroy(pReq->pCols); + taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL); } int32_t tEncodeSRSmaParam(SEncoder *pCoder, const SRSmaParam *pRSmaParam) { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index bd18c9ceb9..c3e0fff578 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3061,6 +3061,33 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha return code; } +int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule, + char** dstTableName) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + if (parTbName[0]) { + if (newSubTableRule && !isAutoTableName(parTbName) && !alreadyAddGroupId(parTbName, gid) && gid != 0 && + stbFullName) { + *dstTableName = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN); + TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno); + + tstrncpy(*dstTableName, parTbName, TSDB_TABLE_NAME_LEN); + code = buildCtbNameAddGroupId(stbFullName, *dstTableName, gid, TSDB_TABLE_NAME_LEN); + TSDB_CHECK_CODE(code, lino, _end); + } else { + *dstTableName = taosStrdup(parTbName); + TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno); + } + } else { + code = buildCtbNameByGroupId(stbFullName, gid, dstTableName); + TSDB_CHECK_CODE(code, lino, _end); + } + +_end: + return code; +} + // return length of encoded data, return -1 if failed int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) { int32_t code = blockDataCheck(pBlock); diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 68ea3f4a42..83b1845fd4 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -500,7 +500,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { TAOS_RETURN(TSDB_CODE_SUCCESS); } -struct SConfig *taosGetCfg() { return tsCfg; } +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -818,8 +820,13 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfSnodeWriteThreads = tsNumOfCores / 4; tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4); - tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; - tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); + tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * QUEUE_MEMORY_USAGE_RATIO; + tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10LL, + TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10000LL); + + tsApplyMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * (1 - QUEUE_MEMORY_USAGE_RATIO); + tsApplyMemoryAllowed = TRANGE(tsApplyMemoryAllowed, TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10LL, + TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10000LL); tsLogBufferMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; tsLogBufferMemoryAllowed = TRANGE(tsLogBufferMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); @@ -857,7 +864,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL)); - TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); + TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * RPC_MEMORY_USAGE_RATIO * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL)); @@ -1569,7 +1576,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfSnodeWriteThreads = pItem->i32; TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "rpcQueueMemoryAllowed"); - tsQueueMemoryAllowed = pItem->i64; + tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO; + tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO); TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "simdEnable"); tsSIMDEnable = (bool)pItem->bval; @@ -2392,6 +2400,12 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { code = TSDB_CODE_SUCCESS; goto _exit; } + if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) { + tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO; + tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO); + code = TSDB_CODE_SUCCESS; + goto _exit; + } if (strcasecmp(name, "numOfCompactThreads") == 0) { #ifdef TD_ENTERPRISE @@ -2497,7 +2511,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"experimental", &tsExperimental}, {"numOfRpcSessions", &tsNumOfRpcSessions}, - {"rpcQueueMemoryAllowed", &tsQueueMemoryAllowed}, {"shellActivityTimer", &tsShellActivityTimer}, {"readTimeout", &tsReadTimeout}, {"safetyCheckLevel", &tsSafetyCheckLevel}, diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 423268a8cc..0a3543ac07 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -181,7 +181,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) { req.numOfSupportVnodes = tsNumOfSupportVnodes; req.numOfDiskCfg = tsDiskCfgNum; req.memTotal = tsTotalMemoryKB * 1024; - req.memAvail = req.memTotal - tsQueueMemoryAllowed - 16 * 1024 * 1024; + req.memAvail = req.memTotal - tsQueueMemoryAllowed - tsApplyMemoryAllowed - 16 * 1024 * 1024; tstrncpy(req.dnodeEp, tsLocalEp, TSDB_EP_LEN); tstrncpy(req.machineId, pMgmt->pData->machineId, TSDB_MACHINE_ID_LEN + 1); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index c22adec9b4..334c213945 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -323,7 +323,7 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { return TSDB_CODE_INVALID_MSG; } - EQItype itype = APPLY_QUEUE == qtype ? DEF_QITEM : RPC_QITEM; + EQItype itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM; SRpcMsg *pMsg; code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg); if (code) { diff --git a/source/dnode/mgmt/test/sut/src/sut.cpp b/source/dnode/mgmt/test/sut/src/sut.cpp index 13c8c73f44..a1fdebb636 100644 --- a/source/dnode/mgmt/test/sut/src/sut.cpp +++ b/source/dnode/mgmt/test/sut/src/sut.cpp @@ -36,7 +36,8 @@ void Testbase::InitLog(const char* path) { tstrncpy(tsLogDir, path, PATH_MAX); taosGetSystemInfo(); - tsQueueMemoryAllowed = tsTotalMemoryKB * 0.1; + tsQueueMemoryAllowed = tsTotalMemoryKB * 0.06; + tsApplyMemoryAllowed = tsTotalMemoryKB * 0.04; if (taosInitLog("taosdlog", 1, false) != 0) { printf("failed to init log file\n"); } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 50018e867f..c1cf41103b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -753,6 +753,77 @@ static int32_t doStreamCheck(SMnode *pMnode, SStreamObj *pStreamObj) { return TSDB_CODE_SUCCESS; } +static void *notifyAddrDup(void *p) { return taosStrdup((char *)p); } + +static int32_t addStreamTaskNotifyInfo(const SCMCreateStreamReq *createReq, const SStreamObj *pStream, + SStreamTask *pTask) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA); + + pTask->notifyInfo.pNotifyAddrUrls = taosArrayDup(createReq->pNotifyAddrUrls, notifyAddrDup); + TSDB_CHECK_NULL(pTask->notifyInfo.pNotifyAddrUrls, code, lino, _end, terrno); + pTask->notifyInfo.notifyEventTypes = createReq->notifyEventTypes; + pTask->notifyInfo.notifyErrorHandle = createReq->notifyErrorHandle; + pTask->notifyInfo.streamName = taosStrdup(createReq->name); + TSDB_CHECK_NULL(pTask->notifyInfo.streamName, code, lino, _end, terrno); + pTask->notifyInfo.stbFullName = taosStrdup(createReq->targetStbFullName); + TSDB_CHECK_NULL(pTask->notifyInfo.stbFullName, code, lino, _end, terrno); + pTask->notifyInfo.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema); + TSDB_CHECK_NULL(pTask->notifyInfo.pSchemaWrapper, code, lino, _end, terrno); + +_end: + if (code != TSDB_CODE_SUCCESS) { + mError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t addStreamNotifyInfo(SCMCreateStreamReq *createReq, SStreamObj *pStream) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t level = 0; + int32_t nTasks = 0; + SArray *pLevel = NULL; + + TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pStream, code, lino, _end, TSDB_CODE_INVALID_PARA); + + if (taosArrayGetSize(createReq->pNotifyAddrUrls) == 0) { + goto _end; + } + + level = taosArrayGetSize(pStream->tasks); + for (int32_t i = 0; i < level; ++i) { + pLevel = taosArrayGetP(pStream->tasks, i); + nTasks = taosArrayGetSize(pLevel); + for (int32_t j = 0; j < nTasks; ++j) { + code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j)); + TSDB_CHECK_CODE(code, lino, _end); + } + } + + if (pStream->conf.fillHistory && createReq->notifyHistory) { + level = taosArrayGetSize(pStream->pHTasksList); + for (int32_t i = 0; i < level; ++i) { + pLevel = taosArrayGetP(pStream->pHTasksList, i); + nTasks = taosArrayGetSize(pLevel); + for (int32_t j = 0; j < nTasks; ++j) { + code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j)); + TSDB_CHECK_CODE(code, lino, _end); + } + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + mError("%s for stream %s failed at line %d since %s", __func__, pStream->name, lino, tstrerror(code)); + } + return code; +} + static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; @@ -850,6 +921,14 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } + // add notify info into all stream tasks + code = addStreamNotifyInfo(&createReq, &streamObj); + if (code != TSDB_CODE_SUCCESS) { + mError("stream:%s failed to add stream notify info since %s", createReq.name, tstrerror(code)); + mndTransDrop(pTrans); + goto _OVER; + } + // add stream to trans code = mndPersistStream(pTrans, &streamObj); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) { diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 8f63cc8779..b90e1844ae 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -75,6 +75,7 @@ set( "src/tq/tqSnapshot.c" "src/tq/tqStreamStateSnap.c" "src/tq/tqStreamTaskSnap.c" + "src/tq/tqStreamNotify.c" ) aux_source_directory("src/tsdb/" TSDB_SOURCE_FILES) diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 12a803d1d8..e0bf51b333 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -159,6 +159,11 @@ int32_t buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t n SArray* pTagArray, bool newSubTableRule, SVCreateTbReq** pReq); int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type); +// tq send notifications +int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap); +void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap); +int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode); + #define TQ_ERR_GO_TO_END(c) \ do { \ code = c; \ diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 940116317c..5a61c1c124 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -81,6 +81,8 @@ typedef struct SCommitInfo SCommitInfo; typedef struct SCompactInfo SCompactInfo; typedef struct SQueryNode SQueryNode; +typedef struct SStreamNotifyHandleMap SStreamNotifyHandleMap; + #define VNODE_META_TMP_DIR "meta.tmp" #define VNODE_META_BACKUP_DIR "meta.backup" @@ -255,6 +257,9 @@ int32_t tqProcessTaskCheckpointReadyRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqBuildStreamTask(void* pTq, SStreamTask* pTask, int64_t ver); int32_t tqScanWal(STQ* pTq); +// injection error +void streamMetaFreeTQDuringScanWalError(STQ* pTq); + int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd); int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId); // tq-mq @@ -496,6 +501,9 @@ struct SVnode { int64_t blockSeq; SQHandle* pQuery; SVMonitorObj monitor; + + // Notification Handles + SStreamNotifyHandleMap* pNotifyHandleMap; }; #define TD_VID(PVNODE) ((PVNODE)->config.vgId) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 3bfc50fcb2..5b19d4cd87 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -75,12 +75,14 @@ int32_t tqOpen(const char* path, SVnode* pVnode) { if (pTq == NULL) { return terrno; } + pVnode->pTq = pTq; + pTq->pVnode = pVnode; + pTq->path = taosStrdup(path); if (pTq->path == NULL) { return terrno; } - pTq->pVnode = pVnode; pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK); if (pTq->pHandle == NULL) { @@ -131,11 +133,19 @@ void tqClose(STQ* pTq) { return; } + int32_t vgId = 0; + if (pTq->pVnode != NULL) { + vgId = TD_VID(pTq->pVnode); + } else if (pTq->pStreamMeta != NULL) { + vgId = pTq->pStreamMeta->vgId; + } + + // close the stream meta firstly + streamMetaClose(pTq->pStreamMeta); + void* pIter = taosHashIterate(pTq->pPushMgr, NULL); while (pIter) { STqHandle* pHandle = *(STqHandle**)pIter; - int32_t vgId = TD_VID(pTq->pVnode); - if (pHandle->msg != NULL) { tqPushEmptyDataRsp(pHandle, vgId); rpcFreeCont(pHandle->msg->pCont); @@ -151,8 +161,12 @@ void tqClose(STQ* pTq) { taosHashCleanup(pTq->pOffset); taosMemoryFree(pTq->path); tqMetaClose(pTq); - qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1); - streamMetaClose(pTq->pStreamMeta); + qDebug("vgId:%d end to close tq", vgId); + +#if 0 + streamMetaFreeTQDuringScanWalError(pTq); +#endif + taosMemoryFree(pTq); } diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 7ba77cf813..98ea92125c 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -16,8 +16,6 @@ #include "tcommon.h" #include "tq.h" -#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1)) - typedef struct STableSinkInfo { uint64_t uid; tstr name; @@ -983,7 +981,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat tqDebug("s-task:%s append groupId:%" PRId64 " for generated dstTable:%s", id, groupId, dstTableName); if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) { code = buildCtbNameAddGroupId(NULL, dstTableName, groupId, sizeof(pDataBlock->info.parTbName)); - } else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER && stbFullName) { + } else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER && stbFullName) { code = buildCtbNameAddGroupId(stbFullName, dstTableName, groupId, sizeof(pDataBlock->info.parTbName)); } if (code != TSDB_CODE_SUCCESS) { @@ -1150,6 +1148,12 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { return; } + code = tqSendAllNotifyEvents(pBlocks, pTask, pVnode); + if (code != TSDB_CODE_SUCCESS) { + tqError("vgId: %d, s-task:%s failed to send all event notifications", vgId, id); + // continue processing even if notification fails + } + bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks); if (!onlySubmitData || pTask->subtableWithoutMd5 == 1) { tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id, @@ -1173,6 +1177,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { continue; } else if (pDataBlock->info.type == STREAM_DROP_CHILD_TABLE && pTask->subtableWithoutMd5) { code = doBuildAndSendDropTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid); + } else if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) { + continue; } else { code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs); } @@ -1317,6 +1323,10 @@ void rebuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVno continue; } + if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) { + continue; + } + hasSubmit = true; pTask->execInfo.sink.numOfBlocks += 1; uint64_t groupId = pDataBlock->info.id.groupId; diff --git a/source/dnode/vnode/src/tq/tqStreamNotify.c b/source/dnode/vnode/src/tq/tqStreamNotify.c new file mode 100644 index 0000000000..46ee95d3b9 --- /dev/null +++ b/source/dnode/vnode/src/tq/tqStreamNotify.c @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "cmdnodes.h" +#include "tq.h" + +#ifndef WINDOWS +#include "curl/curl.h" +#endif + +#define STREAM_EVENT_NOTIFY_RETRY_MS 50 // 50ms + +typedef struct SStreamNotifyHandle { + TdThreadMutex mutex; +#ifndef WINDOWS + CURL* curl; +#endif + char* url; +} SStreamNotifyHandle; + +struct SStreamNotifyHandleMap { + TdThreadMutex gMutex; + SHashObj* handleMap; +}; + +static void stopStreamNotifyConn(SStreamNotifyHandle* pHandle) { +#ifndef WINDOWS + if (pHandle == NULL || pHandle->curl == NULL) { + return; + } + // status code 1000 means normal closure + size_t len = 0; + uint16_t status = htons(1000); + CURLcode res = curl_ws_send(pHandle->curl, &status, sizeof(status), &len, 0, CURLWS_CLOSE); + if (res != CURLE_OK) { + tqWarn("failed to send ws-close msg to %s for %d", pHandle->url ? pHandle->url : "", res); + } + // TODO: add wait mechanism for peer connection close response + curl_easy_cleanup(pHandle->curl); +#endif +} + +static void destroyStreamNotifyHandle(void* ptr) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamNotifyHandle** ppHandle = ptr; + + if (ppHandle == NULL || *ppHandle == NULL) { + return; + } + code = taosThreadMutexDestroy(&(*ppHandle)->mutex); + stopStreamNotifyConn(*ppHandle); + taosMemoryFreeClear((*ppHandle)->url); + taosMemoryFreeClear(*ppHandle); +} + +static void releaseStreamNotifyHandle(SStreamNotifyHandle** ppHandle) { + if (ppHandle == NULL || *ppHandle == NULL) { + return; + } + (void)taosThreadMutexUnlock(&(*ppHandle)->mutex); + *ppHandle = NULL; +} + +static int32_t acquireStreamNotifyHandle(SStreamNotifyHandleMap* pMap, const char* url, + SStreamNotifyHandle** ppHandle) { +#ifndef WINDOWS + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + bool gLocked = false; + SStreamNotifyHandle** ppFindHandle = NULL; + SStreamNotifyHandle* pNewHandle = NULL; + CURL* newCurl = NULL; + CURLcode res = CURLE_OK; + + TSDB_CHECK_NULL(pMap, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(url, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(ppHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *ppHandle = NULL; + + code = taosThreadMutexLock(&pMap->gMutex); + TSDB_CHECK_CODE(code, lino, _end); + gLocked = true; + + ppFindHandle = taosHashGet(pMap->handleMap, url, strlen(url)); + if (ppFindHandle == NULL) { + pNewHandle = taosMemoryCalloc(1, sizeof(SStreamNotifyHandle)); + TSDB_CHECK_NULL(pNewHandle, code, lino, _end, terrno); + code = taosThreadMutexInit(&pNewHandle->mutex, NULL); + TSDB_CHECK_CODE(code, lino, _end); + code = taosHashPut(pMap->handleMap, url, strlen(url), &pNewHandle, POINTER_BYTES); + TSDB_CHECK_CODE(code, lino, _end); + *ppHandle = pNewHandle; + pNewHandle = NULL; + } else { + *ppHandle = *ppFindHandle; + } + + code = taosThreadMutexLock(&(*ppHandle)->mutex); + TSDB_CHECK_CODE(code, lino, _end); + + (void)taosThreadMutexUnlock(&pMap->gMutex); + gLocked = false; + + if ((*ppHandle)->curl == NULL) { + newCurl = curl_easy_init(); + TSDB_CHECK_NULL(newCurl, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_URL, url); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYPEER, 0L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYHOST, 0L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_TIMEOUT, 3L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_setopt(newCurl, CURLOPT_CONNECT_ONLY, 2L); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + res = curl_easy_perform(newCurl); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + (*ppHandle)->curl = newCurl; + newCurl = NULL; + } + + if ((*ppHandle)->url == NULL) { + (*ppHandle)->url = taosStrdup(url); + TSDB_CHECK_NULL((*ppHandle)->url, code, lino, _end, terrno); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code)); + if (*ppHandle) { + releaseStreamNotifyHandle(ppHandle); + } + *ppHandle = NULL; + } + if (newCurl) { + curl_easy_cleanup(newCurl); + } + if (pNewHandle) { + destroyStreamNotifyHandle(&pNewHandle); + } + if (gLocked) { + (void)taosThreadMutexUnlock(&pMap->gMutex); + } + return code; +#else + tqError("stream notify events is not supported on windows"); + return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS; +#endif +} + +int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SStreamNotifyHandleMap* pMap = NULL; + + TSDB_CHECK_NULL(ppMap, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *ppMap = NULL; + pMap = taosMemoryCalloc(1, sizeof(SStreamNotifyHandleMap)); + TSDB_CHECK_NULL(pMap, code, lino, _end, terrno); + code = taosThreadMutexInit(&pMap->gMutex, NULL); + TSDB_CHECK_CODE(code, lino, _end); + pMap->handleMap = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + TSDB_CHECK_NULL(pMap->handleMap, code, lino, _end, terrno); + taosHashSetFreeFp(pMap->handleMap, destroyStreamNotifyHandle); + *ppMap = pMap; + pMap = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (pMap != NULL) { + tqDestroyNotifyHandleMap(&pMap); + } + return code; +} + +void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + if (*ppMap == NULL) { + return; + } + taosHashCleanup((*ppMap)->handleMap); + code = taosThreadMutexDestroy(&(*ppMap)->gMutex); + taosMemoryFreeClear((*ppMap)); +} + +#define JSON_CHECK_ADD_ITEM(obj, str, item) \ + TSDB_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + +static int32_t getStreamNotifyEventHeader(const char* streamName, char** pHeader) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + cJSON* obj = NULL; + cJSON* streams = NULL; + cJSON* stream = NULL; + char msgId[37]; + + TSDB_CHECK_NULL(streamName, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pHeader, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pHeader = NULL; + + code = taosGetSystemUUIDLimit36(msgId, sizeof(msgId)); + TSDB_CHECK_CODE(code, lino, _end); + + stream = cJSON_CreateObject(); + TSDB_CHECK_NULL(stream, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(stream, "streamName", cJSON_CreateStringReference(streamName)); + JSON_CHECK_ADD_ITEM(stream, "events", cJSON_CreateArray()); + + streams = cJSON_CreateArray(); + TSDB_CHECK_CONDITION(cJSON_AddItemToArray(streams, stream), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + stream = NULL; + + obj = cJSON_CreateObject(); + TSDB_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(obj, "messageId", cJSON_CreateStringReference(msgId)); + JSON_CHECK_ADD_ITEM(obj, "timestamp", cJSON_CreateNumber(taosGetTimestampMs())); + JSON_CHECK_ADD_ITEM(obj, "streams", streams); + streams = NULL; + + *pHeader = cJSON_PrintUnformatted(obj); + TSDB_CHECK_NULL(*pHeader, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (stream != NULL) { + cJSON_Delete(stream); + } + if (streams != NULL) { + cJSON_Delete(streams); + } + if (obj != NULL) { + cJSON_Delete(obj); + } + return code; +} + +static int32_t packupStreamNotifyEvent(const char* streamName, const SArray* pBlocks, char** pMsg, + int32_t* nNotifyEvents) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + int32_t numOfBlocks = 0; + int32_t msgHeaderLen = 0; + int32_t msgTailLen = 0; + int32_t msgLen = 0; + char* msgHeader = NULL; + const char* msgTail = "]}]}"; + char* msg = NULL; + + TSDB_CHECK_NULL(pMsg, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pMsg = NULL; + numOfBlocks = taosArrayGetSize(pBlocks); + *nNotifyEvents = 0; + + for (int32_t i = 0; i < numOfBlocks; ++i) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) { + continue; + } + + SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { + char* val = colDataGetVarData(pEventStrCol, j); + msgLen += varDataLen(val) + 1; + } + *nNotifyEvents += pDataBlock->info.rows; + } + + if (msgLen == 0) { + // skip since no notification events found + goto _end; + } + + code = getStreamNotifyEventHeader(streamName, &msgHeader); + TSDB_CHECK_CODE(code, lino, _end); + msgHeaderLen = strlen(msgHeader); + msgTailLen = strlen(msgTail); + msgLen += msgHeaderLen; + + msg = taosMemoryMalloc(msgLen); + TSDB_CHECK_NULL(msg, code, lino, _end, terrno); + char* p = msg; + TAOS_STRNCPY(p, msgHeader, msgHeaderLen); + p += msgHeaderLen - msgTailLen; + + for (int32_t i = 0; i < numOfBlocks; ++i) { + SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); + if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) { + continue; + } + + SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + for (int32_t j = 0; j < pDataBlock->info.rows; ++j) { + char* val = colDataGetVarData(pEventStrCol, j); + TAOS_STRNCPY(p, varDataVal(val), varDataLen(val)); + p += varDataLen(val); + *(p++) = ','; + } + } + + p -= 1; + TAOS_STRNCPY(p, msgTail, msgTailLen); + *(p + msgTailLen) = '\0'; + + *pMsg = msg; + msg = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (msgHeader != NULL) { + cJSON_free(msgHeader); + } + if (msg != NULL) { + taosMemoryFreeClear(msg); + } + return code; +} + +static int32_t sendSingleStreamNotify(SStreamNotifyHandle* pHandle, char* msg) { +#ifndef WINDOWS + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + CURLcode res = CURLE_OK; + uint64_t sentLen = 0; + uint64_t totalLen = 0; + size_t nbytes = 0; + + TSDB_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pHandle->curl, code, lino, _end, TSDB_CODE_INVALID_PARA); + + totalLen = strlen(msg); + while (sentLen < totalLen) { + res = curl_ws_send(pHandle->curl, msg + sentLen, totalLen - sentLen, &nbytes, 0, CURLWS_TEXT); + TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED); + sentLen += nbytes; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code)); + stopStreamNotifyConn(pHandle); + } + return code; +#else + tqError("stream notify events is not supported on windows"); + return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS; +#endif +} + +int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + char* msg = NULL; + int32_t nNotifyAddr = 0; + int32_t nNotifyEvents = 0; + SStreamNotifyHandle* pHandle = NULL; + + TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA); + TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA); + + nNotifyAddr = taosArrayGetSize(pTask->notifyInfo.pNotifyAddrUrls); + if (nNotifyAddr == 0) { + goto _end; + } + + code = packupStreamNotifyEvent(pTask->notifyInfo.streamName, pBlocks, &msg, &nNotifyEvents); + TSDB_CHECK_CODE(code, lino, _end); + if (msg == NULL) { + goto _end; + } + + tqDebug("stream task %s prepare to send %d notify events, total msg length: %" PRIu64, pTask->notifyInfo.streamName, + nNotifyEvents, (uint64_t)strlen(msg)); + + for (int32_t i = 0; i < nNotifyAddr; ++i) { + if (streamTaskShouldStop(pTask)) { + break; + } + const char* url = taosArrayGetP(pTask->notifyInfo.pNotifyAddrUrls, i); + code = acquireStreamNotifyHandle(pVnode->pNotifyHandleMap, url, &pHandle); + if (code != TSDB_CODE_SUCCESS) { + tqError("failed to get stream notify handle of %s", url); + if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) { + // retry for event message sending in PAUSE error handling mode + taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS); + --i; + continue; + } else { + // simply ignore the failure in DROP error handling mode + code = TSDB_CODE_SUCCESS; + continue; + } + } + code = sendSingleStreamNotify(pHandle, msg); + if (code != TSDB_CODE_SUCCESS) { + tqError("failed to send stream notify handle to %s since %s", url, tstrerror(code)); + if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) { + // retry for event message sending in PAUSE error handling mode + taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS); + --i; + } else { + // simply ignore the failure in DROP error handling mode + code = TSDB_CODE_SUCCESS; + } + } else { + tqDebug("stream task %s send %d notify events to %s successfully", pTask->notifyInfo.streamName, nNotifyEvents, + url); + } + releaseStreamNotifyHandle(&pHandle); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (msg) { + taosMemoryFreeClear(msg); + } + return code; +} diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index bc7e2e28e3..9ea84830f1 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -22,6 +22,8 @@ typedef struct SBuildScanWalMsgParam { int64_t metaId; int32_t numOfTasks; + int8_t restored; + SMsgCb msgCb; } SBuildScanWalMsgParam; static int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta); @@ -74,7 +76,6 @@ int32_t tqScanWal(STQ* pTq) { static void doStartScanWal(void* param, void* tmrId) { int32_t vgId = 0; - STQ* pTq = NULL; int32_t code = 0; SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param; @@ -86,13 +87,29 @@ static void doStartScanWal(void* param, void* tmrId) { return; } + if (pMeta->closeFlag) { + code = taosReleaseRef(streamMetaRefPool, pParam->metaId); + if (code == TSDB_CODE_SUCCESS) { + tqDebug("vgId:%d jump out of scan wal timer since closed", vgId); + } else { + tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId, + tstrerror(code)); + } + + taosMemoryFree(pParam); + return; + } + vgId = pMeta->vgId; - pTq = pMeta->ahandle; tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks, - pTq->pVnode->restored); + pParam->restored); +#if 0 + // wait for the vnode is freed, and invalid read may occur. + taosMsleep(10000); +#endif - code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); + code = streamTaskSchedTask(&pParam->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); if (code) { tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); } @@ -120,6 +137,8 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) { pParam->metaId = pMeta->rid; pParam->numOfTasks = numOfTasks; + pParam->restored = pTq->pVnode->restored; + pParam->msgCb = pTq->pVnode->msgCb; code = streamTimerGetInstance(&pTimer); if (code) { @@ -330,13 +349,13 @@ int32_t doPutDataIntoInputQ(SStreamTask* pTask, int64_t maxVer, int32_t* numOfIt int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta) { int32_t vgId = pStreamMeta->vgId; + SArray* pTaskList = NULL; int32_t numOfTasks = taosArrayGetSize(pStreamMeta->pTaskList); if (numOfTasks == 0) { return TSDB_CODE_SUCCESS; } // clone the task list, to avoid the task update during scan wal files - SArray* pTaskList = NULL; streamMetaWLock(pStreamMeta); pTaskList = taosArrayDup(pStreamMeta->pTaskList, NULL); streamMetaWUnLock(pStreamMeta); @@ -447,3 +466,11 @@ int32_t doScanWalAsync(STQ* pTq, bool ckPause) { return streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); } + +void streamMetaFreeTQDuringScanWalError(STQ* pTq) { + SBuildScanWalMsgParam* p = taosMemoryCalloc(1, sizeof(SBuildScanWalMsgParam)); + p->metaId = pTq->pStreamMeta->rid; + p->numOfTasks = 0; + + doStartScanWal(p, 0); +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 06b7b33cd8..1880156f61 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -86,6 +86,14 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { if (code) { return code; } + + code = + qSetStreamNotifyInfo(pTask->exec.pExecutor, pTask->notifyInfo.notifyEventTypes, + pTask->notifyInfo.pSchemaWrapper, pTask->notifyInfo.stbFullName, IS_NEW_SUBTB_RULE(pTask)); + if (code) { + tqError("s-task:%s failed to set stream notify info, code:%s", pTask->id.idStr, tstrerror(code)); + return code; + } } streamSetupScheduleTrigger(pTask); @@ -1357,4 +1365,4 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { streamMetaReleaseTask(pMeta, pTask); return 0; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 6de5298728..280ee527f7 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -15,6 +15,7 @@ #include "sync.h" #include "tcs.h" +#include "tq.h" #include "tsdb.h" #include "vnd.h" @@ -483,6 +484,14 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC ret = taosRealPath(tdir, NULL, sizeof(tdir)); TAOS_UNUSED(ret); + // init handle map for stream event notification + ret = tqInitNotifyHandleMap(&pVnode->pNotifyHandleMap); + if (ret != TSDB_CODE_SUCCESS) { + vError("vgId:%d, failed to init StreamNotifyHandleMap", TD_VID(pVnode)); + terrno = ret; + goto _err; + } + // open query vInfo("vgId:%d, start to open vnode query", TD_VID(pVnode)); if (vnodeQueryOpen(pVnode)) { @@ -555,6 +564,7 @@ void vnodeClose(SVnode *pVnode) { vnodeAWait(&pVnode->commitTask); vnodeSyncClose(pVnode); vnodeQueryClose(pVnode); + tqDestroyNotifyHandleMap(&pVnode->pNotifyHandleMap); tqClose(pVnode->pTq); walClose(pVnode->pWal); if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 48afa78251..84eba69acb 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -449,9 +449,17 @@ typedef struct STimeWindowAggSupp { SColumnInfoData timeWindowData; // query time window info for scalar function execution. } STimeWindowAggSupp; +typedef struct SStreamNotifyEventSupp { + SArray* pWindowEvents; // Array of SStreamNotifyEvent, storing window events and trigger values. + SHashObj* pTableNameHashMap; // Hash map from groupid to the dest child table name. + SHashObj* pResultHashMap; // Hash map from groupid+skey to the window agg result. + SSDataBlock* pEventBlock; // The datablock contains all window events and results. +} SStreamNotifyEventSupp; + typedef struct SSteamOpBasicInfo { - int32_t primaryPkIndex; - bool updateOperatorInfo; + int32_t primaryPkIndex; + bool updateOperatorInfo; + SStreamNotifyEventSupp windowEventSup; } SSteamOpBasicInfo; typedef struct SStreamFillSupporter { @@ -767,6 +775,8 @@ typedef struct SStreamEventAggOperatorInfo { SSHashObj* pPkDeleted; bool destHasPrimaryKey; struct SOperatorInfo* pOperator; + SNodeList* pStartCondCols; + SNodeList* pEndCondCols; } SStreamEventAggOperatorInfo; typedef struct SStreamCountAggOperatorInfo { diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index f726e4300f..86ee6f4124 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -71,6 +71,10 @@ typedef struct { SVersionRange fillHistoryVer; STimeWindow fillHistoryWindow; SStreamState* pState; + int32_t eventTypes; // event types to notify + SSchemaWrapper* notifyResultSchema; // agg result to notify + char* stbFullName; // used to generate dest child table name + bool newSubTableRule; // used to generate dest child table name } SStreamTaskInfo; struct SExecTaskInfo { diff --git a/source/libs/executor/inc/streamexecutorInt.h b/source/libs/executor/inc/streamexecutorInt.h index 0a69080314..7b3c828351 100644 --- a/source/libs/executor/inc/streamexecutorInt.h +++ b/source/libs/executor/inc/streamexecutorInt.h @@ -19,7 +19,10 @@ extern "C" { #endif +#include "cJSON.h" +#include "cmdnodes.h" #include "executorInt.h" +#include "querytask.h" #include "tutil.h" #define FILL_POS_INVALID 0 @@ -57,7 +60,8 @@ typedef struct SSlicePoint { void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type); bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo); void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo); -void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); +int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); +void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo); int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption); void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins); @@ -106,6 +110,13 @@ int32_t buildAllResultKey(SStateStore* pStateStore, SStreamState* pState, TSKEY int32_t initOffsetInfo(int32_t** ppOffset, SSDataBlock* pRes); TSKEY compareTs(void* pKey); +int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey, + const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri, + SStreamNotifyEventSupp* sup); +int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper, + SStreamNotifyEventSupp* sup); +int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup); + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index dffab1b163..39bef9c95f 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -250,6 +250,28 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) { return code; } +int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper, + const char* stbFullName, bool newSubTableRule) { + int32_t code = TSDB_CODE_SUCCESS; + SStreamTaskInfo *pStreamInfo = NULL; + + if (tinfo == 0 || eventTypes == 0 || pSchemaWrapper == NULL || stbFullName == NULL) { + goto _end; + } + + pStreamInfo = &((SExecTaskInfo*)tinfo)->streamInfo; + pStreamInfo->eventTypes = eventTypes; + pStreamInfo->notifyResultSchema = tCloneSSchemaWrapper(pSchemaWrapper); + if (pStreamInfo->notifyResultSchema == NULL) { + code = terrno; + } + pStreamInfo->stbFullName = taosStrdup(stbFullName); + pStreamInfo->newSubTableRule = newSubTableRule; + +_end: + return code; +} + int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) { if (tinfo == NULL) { return TSDB_CODE_APP_ERROR; @@ -469,6 +491,13 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI } SStreamScanInfo* pScanInfo = pInfo->info; + if (pInfo->pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) { // clear meta cache for subscription if tag is changed + for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) { + int64_t* uid = (int64_t*)taosArrayGet(tableIdList, i); + STableScanInfo* pTableScanInfo = pScanInfo->pTableScanOp->info; + taosLRUCacheErase(pTableScanInfo->base.metaCache.pTableMetaEntryCache, uid, LONG_BYTES); + } + } if (isAdd) { // add new table id SArray* qa = NULL; diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index c6a1900b41..20c80df4fa 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -262,6 +262,8 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) { static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) { tDeleteSchemaWrapper(pStreamInfo->schema); tOffsetDestroy(&pStreamInfo->currentOffset); + tDeleteSchemaWrapper(pStreamInfo->notifyResultSchema); + taosMemoryFree(pStreamInfo->stbFullName); } static void freeBlock(void* pParam) { diff --git a/source/libs/executor/src/streameventwindowoperator.c b/source/libs/executor/src/streameventwindowoperator.c index fa6008eba7..5f4d6b30fa 100644 --- a/source/libs/executor/src/streameventwindowoperator.c +++ b/source/libs/executor/src/streameventwindowoperator.c @@ -12,6 +12,8 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ + +#include "cmdnodes.h" #include "executorInt.h" #include "filter.h" #include "function.h" @@ -53,6 +55,8 @@ void destroyStreamEventOperatorInfo(void* param) { &pInfo->groupResInfo); pInfo->pOperator = NULL; } + + destroyStreamBasicInfo(&pInfo->basic); destroyStreamAggSupporter(&pInfo->streamAggSup); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); @@ -89,6 +93,16 @@ void destroyStreamEventOperatorInfo(void* param) { pInfo->pEndCondInfo = NULL; } + if (pInfo->pStartCondCols != NULL) { + nodesDestroyList(pInfo->pStartCondCols); + pInfo->pStartCondCols = NULL; + } + + if (pInfo->pEndCondCols != NULL) { + nodesDestroyList(pInfo->pEndCondCols); + pInfo->pEndCondCols = NULL; + } + taosMemoryFreeClear(param); } @@ -121,7 +135,7 @@ void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI) { } int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t groupId, bool* pStart, bool* pEnd, - int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey) { + int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey, int32_t* pWinCode) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; int32_t winCode = TSDB_CODE_SUCCESS; @@ -143,6 +157,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro setEventWindowInfo(pAggSup, &leftWinKey, pVal, pCurWin); if (inWin || (pCurWin->pWinFlag->startFlag && !pCurWin->pWinFlag->endFlag)) { pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin); + (*pWinCode) = TSDB_CODE_SUCCESS; goto _end; } } @@ -156,6 +171,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro if (endi < 0 || pTs[endi] >= rightWinKey.win.skey) { setEventWindowInfo(pAggSup, &rightWinKey, pVal, pCurWin); pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin); + (*pWinCode) = TSDB_CODE_SUCCESS; goto _end; } } @@ -163,6 +179,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro SSessionKey winKey = {.win.skey = ts, .win.ekey = ts, .groupId = groupId}; code = pAggSup->stateStore.streamStateSessionAllocWinBuffByNextPosition(pAggSup->pState, pCur, &winKey, &pVal, &len); QUERY_CHECK_CODE(code, lino, _error); + (*pWinCode) = TSDB_CODE_FAILED; setEventWindowInfo(pAggSup, &winKey, pVal, pCurWin); pCurWin->pWinFlag->startFlag = start; @@ -373,10 +390,18 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl bool allEqual = true; SEventWindowInfo curWin = {0}; SSessionKey nextWinKey = {0}; + int32_t winCode = TSDB_CODE_SUCCESS; code = setEventOutputBuf(pAggSup, tsCols, groupId, (bool*)pColStart->pData, (bool*)pColEnd->pData, i, rows, &curWin, - &nextWinKey); + &nextWinKey, &winCode); QUERY_CHECK_CODE(code, lino, _end); + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_OPEN) && + *(bool*)colDataGetNumData(pColStart, i) && winCode != TSDB_CODE_SUCCESS) { + code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_OPEN, &curWin.winInfo.sessionWin, pSDataBlock, + pInfo->pStartCondCols, i, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } + setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo); bool rebuild = false; code = updateEventWindowInfo(pAggSup, &curWin, &nextWinKey, tsCols, (bool*)pColStart->pData, (bool*)pColEnd->pData, @@ -443,6 +468,12 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo)); QUERY_CHECK_CODE(code, lino, _end); } + + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) { + code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_CLOSE, &curWin.winInfo.sessionWin, pSDataBlock, + pInfo->pEndCondCols, i + winRows - 1, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } } _end: @@ -563,6 +594,7 @@ void doStreamEventSaveCheckpoint(SOperatorInfo* pOperator) { static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; SStreamEventAggOperatorInfo* pInfo = pOperator->info; SOptrBasicInfo* pBInfo = &pInfo->binfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -577,10 +609,27 @@ static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) { doBuildSessionResult(pOperator, pInfo->streamAggSup.pState, &pInfo->groupResInfo, pBInfo->pRes); if (pBInfo->pRes->info.rows > 0) { printDataBlock(pBInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) { + code = addAggResultNotifyEvent(pBInfo->pRes, pTaskInfo->streamInfo.notifyResultSchema, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + } (*ppRes) = pBInfo->pRes; return code; } + + code = buildNotifyEventBlock(pTaskInfo, &pInfo->basic.windowEventSup); + QUERY_CHECK_CODE(code, lino, _end); + if (pInfo->basic.windowEventSup.pEventBlock->info.rows > 0) { + printDataBlock(pInfo->basic.windowEventSup.pEventBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->basic.windowEventSup.pEventBlock; + return code; + } + +_end: (*ppRes) = NULL; + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + } return code; } @@ -957,6 +1006,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pInfo->pPkDeleted = tSimpleHashInit(64, hashFn); QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno); pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimaryKey; + initStreamBasicInfo(&pInfo->basic); pInfo->pOperator = pOperator; setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED, @@ -989,6 +1039,12 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* code = filterInitFromNode((SNode*)pEventNode->pEndCond, &pInfo->pEndCondInfo, 0); QUERY_CHECK_CODE(code, lino, _error); + code = + nodesCollectColumnsFromNode((SNode*)pEventNode->pStartCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pStartCondCols); + QUERY_CHECK_CODE(code, lino, _error); + code = nodesCollectColumnsFromNode((SNode*)pEventNode->pEndCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pEndCondCols); + QUERY_CHECK_CODE(code, lino, _error); + *pOptrInfo = pOperator; return TSDB_CODE_SUCCESS; diff --git a/source/libs/executor/src/streamexecutorInt.c b/source/libs/executor/src/streamexecutorInt.c index b94798934c..9cafdfff0c 100644 --- a/source/libs/executor/src/streamexecutorInt.c +++ b/source/libs/executor/src/streamexecutorInt.c @@ -13,7 +13,19 @@ * along with this program. If not, see . */ +#include "streamexecutorInt.h" + #include "executorInt.h" +#include "tdatablock.h" + +#define NOTIFY_EVENT_NAME_CACHE_LIMIT_MB 16 + +typedef struct SStreamNotifyEvent { + uint64_t gid; + TSKEY skey; + char* content; + bool isEnd; +} SStreamNotifyEvent; void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type) { if (type != STREAM_GET_ALL && type != STREAM_CHECKPOINT) { @@ -29,7 +41,509 @@ void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->updateOperatorInfo = false; } -void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { +static void destroyStreamWindowEvent(void* ptr) { + SStreamNotifyEvent* pEvent = ptr; + if (pEvent == NULL || pEvent->content == NULL) return; + cJSON_free(pEvent->content); +} + +static void destroyStreamNotifyEventSupp(SStreamNotifyEventSupp* sup) { + if (sup == NULL) return; + taosArrayDestroyEx(sup->pWindowEvents, destroyStreamWindowEvent); + taosHashCleanup(sup->pTableNameHashMap); + taosHashCleanup(sup->pResultHashMap); + blockDataDestroy(sup->pEventBlock); + *sup = (SStreamNotifyEventSupp){0}; +} + +static int32_t initStreamNotifyEventSupp(SStreamNotifyEventSupp *sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SSDataBlock* pBlock = NULL; + SColumnInfoData infoData = {0}; + + if (sup == NULL) { + goto _end; + } + + code = createDataBlock(&pBlock); + QUERY_CHECK_CODE(code, lino, _end); + + pBlock->info.type = STREAM_NOTIFY_EVENT; + pBlock->info.watermark = INT64_MIN; + + infoData.info.type = TSDB_DATA_TYPE_VARCHAR; + infoData.info.bytes = tDataTypes[infoData.info.type].bytes; + code = blockDataAppendColInfo(pBlock, &infoData); + QUERY_CHECK_CODE(code, lino, _end); + + sup->pWindowEvents = taosArrayInit(0, sizeof(SStreamNotifyEvent)); + QUERY_CHECK_NULL(sup->pWindowEvents, code, lino, _end, terrno); + sup->pTableNameHashMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK); + QUERY_CHECK_NULL(sup->pTableNameHashMap, code, lino, _end, terrno); + sup->pResultHashMap = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + QUERY_CHECK_NULL(sup->pResultHashMap, code, lino, _end, terrno); + taosHashSetFreeFp(sup->pResultHashMap, destroyStreamWindowEvent); + sup->pEventBlock = pBlock; + pBlock = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + if (sup) { + destroyStreamNotifyEventSupp(sup); + } + } + if (pBlock != NULL) { + blockDataDestroy(pBlock); + } + return code; +} + +int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { pBasicInfo->primaryPkIndex = -1; pBasicInfo->updateOperatorInfo = false; + return initStreamNotifyEventSupp(&pBasicInfo->windowEventSup); +} + +void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) { + destroyStreamNotifyEventSupp(&pBasicInfo->windowEventSup); +} + +static void streamNotifyGetEventWindowId(const SSessionKey* pSessionKey, char *buf) { + uint64_t hash = 0; + uint64_t ar[2]; + + ar[0] = pSessionKey->groupId; + ar[1] = pSessionKey->win.skey; + hash = MurmurHash3_64((char*)ar, sizeof(ar)); + buf = u64toaFastLut(hash, buf); +} + +#define JSON_CHECK_ADD_ITEM(obj, str, item) \ + QUERY_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY) + +static int32_t jsonAddColumnField(const char* colName, const SColumnInfoData* pColData, int32_t ri, cJSON* obj) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + char* temp = NULL; + + QUERY_CHECK_NULL(colName, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_INVALID_PARA); + + if (colDataIsNull_s(pColData, ri)) { + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNull()); + goto _end; + } + + switch (pColData->info.type) { + case TSDB_DATA_TYPE_BOOL: { + bool val = *(bool*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateBool(val)); + break; + } + + case TSDB_DATA_TYPE_TINYINT: { + int8_t val = *(int8_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_SMALLINT: { + int16_t val = *(int16_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_INT: { + int32_t val = *(int32_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_TIMESTAMP: { + int64_t val = *(int64_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_FLOAT: { + float val = *(float*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_DOUBLE: { + double val = *(double*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_VARCHAR: + case TSDB_DATA_TYPE_NCHAR: { + // cJSON requires null-terminated strings, but this data is not null-terminated, + // so we need to manually copy the string and add null termination. + const char* src = varDataVal(colDataGetVarData(pColData, ri)); + int32_t len = varDataLen(colDataGetVarData(pColData, ri)); + temp = cJSON_malloc(len + 1); + QUERY_CHECK_NULL(temp, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + memcpy(temp, src, len); + temp[len] = '\0'; + + cJSON* item = cJSON_CreateStringReference(temp); + JSON_CHECK_ADD_ITEM(obj, colName, item); + + // let the cjson object to free memory later + item->type &= ~cJSON_IsReference; + temp = NULL; + break; + } + + case TSDB_DATA_TYPE_UTINYINT: { + uint8_t val = *(uint8_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_USMALLINT: { + uint16_t val = *(uint16_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_UINT: { + uint32_t val = *(uint32_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + case TSDB_DATA_TYPE_UBIGINT: { + uint64_t val = *(uint64_t*)colDataGetNumData(pColData, ri); + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val)); + break; + } + + default: { + JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateStringReference("")); + break; + } + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (temp) { + cJSON_free(temp); + } + return code; +} + +int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey, + const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri, + SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SNode* node = NULL; + cJSON* event = NULL; + cJSON* fields = NULL; + cJSON* cond = NULL; + SStreamNotifyEvent item = {0}; + char windowId[32]; + + QUERY_CHECK_NULL(pSessionKey, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pInputBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pInputBlock->pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pCondCols, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA); + + qDebug("add stream notify event from event window, type: %s, start: %" PRId64 ", end: %" PRId64, + (eventType == SNOTIFY_EVENT_WINDOW_OPEN) ? "WINDOW_OPEN" : "WINDOW_CLOSE", pSessionKey->win.skey, + pSessionKey->win.ekey); + + event = cJSON_CreateObject(); + QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + + // add basic info + streamNotifyGetEventWindowId(pSessionKey, windowId); + if (eventType == SNOTIFY_EVENT_WINDOW_OPEN) { + JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_OPEN")); + } else if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) { + JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_CLOSE")); + } + JSON_CHECK_ADD_ITEM(event, "eventTime", cJSON_CreateNumber(taosGetTimestampMs())); + JSON_CHECK_ADD_ITEM(event, "windowId", cJSON_CreateStringReference(windowId)); + JSON_CHECK_ADD_ITEM(event, "windowType", cJSON_CreateStringReference("Event")); + JSON_CHECK_ADD_ITEM(event, "windowStart", cJSON_CreateNumber(pSessionKey->win.skey)); + if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) { + JSON_CHECK_ADD_ITEM(event, "windowEnd", cJSON_CreateNumber(pSessionKey->win.ekey)); + } + + // create fields object to store matched column values + fields = cJSON_CreateObject(); + QUERY_CHECK_NULL(fields, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + FOREACH(node, pCondCols) { + SColumnNode* pColDef = (SColumnNode*)node; + SColumnInfoData* pColData = taosArrayGet(pInputBlock->pDataBlock, pColDef->slotId); + code = jsonAddColumnField(pColDef->colName, pColData, ri, fields); + QUERY_CHECK_CODE(code, lino, _end); + } + + // add trigger condition + cond = cJSON_CreateObject(); + QUERY_CHECK_NULL(cond, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + JSON_CHECK_ADD_ITEM(cond, "conditionIndex", cJSON_CreateNumber(0)); + JSON_CHECK_ADD_ITEM(cond, "fieldValues", fields); + fields = NULL; + JSON_CHECK_ADD_ITEM(event, "triggerConditions", cond); + cond = NULL; + + // convert json object to string value + item.gid = pSessionKey->groupId; + item.skey = pSessionKey->win.skey; + item.isEnd = (eventType == SNOTIFY_EVENT_WINDOW_CLOSE); + item.content = cJSON_PrintUnformatted(event); + QUERY_CHECK_NULL(taosArrayPush(sup->pWindowEvents, &item), code, lino, _end, terrno); + item.content = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + destroyStreamWindowEvent(&item); + if (cond != NULL) { + cJSON_Delete(cond); + } + if (fields != NULL) { + cJSON_Delete(fields); + } + if (event != NULL) { + cJSON_Delete(event); + } + return code; +} + +int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper, + SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SNode * node = NULL; + cJSON* event = NULL; + cJSON* result = NULL; + SStreamNotifyEvent item = {0}; + SColumnInfoData* pWstartCol = NULL; + + QUERY_CHECK_NULL(pResultBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pSchemaWrapper, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA); + + qDebug("add %" PRId64 " stream notify results from window agg", pResultBlock->info.rows); + + pWstartCol = taosArrayGet(pResultBlock->pDataBlock, 0); + for (int32_t i = 0; i< pResultBlock->info.rows; ++i) { + event = cJSON_CreateObject(); + QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + + // convert the result row into json + result = cJSON_CreateObject(); + QUERY_CHECK_NULL(result, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY); + for (int32_t j = 0; j < pSchemaWrapper->nCols; ++j) { + SSchema *pCol = pSchemaWrapper->pSchema + j; + SColumnInfoData *pColData = taosArrayGet(pResultBlock->pDataBlock, pCol->colId - 1); + code = jsonAddColumnField(pCol->name, pColData, i, result); + QUERY_CHECK_CODE(code, lino, _end); + } + JSON_CHECK_ADD_ITEM(event, "result", result); + result = NULL; + + item.gid = pResultBlock->info.id.groupId; + item.skey = *(uint64_t*)colDataGetNumData(pWstartCol, i); + item.content = cJSON_PrintUnformatted(event); + code = taosHashPut(sup->pResultHashMap, &item.gid, sizeof(item.gid) + sizeof(item.skey), &item, sizeof(item)); + TSDB_CHECK_CODE(code, lino, _end); + item.content = NULL; + + cJSON_Delete(event); + event = NULL; + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + destroyStreamWindowEvent(&item); + if (result != NULL) { + cJSON_Delete(result); + } + if (event != NULL) { + cJSON_Delete(event); + } + return code; +} + +static int32_t streamNotifyGetDestTableName(const SExecTaskInfo* pTaskInfo, uint64_t gid, char** pTableName) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + const SStorageAPI* pAPI = NULL; + void* tbname = NULL; + int32_t winCode = TSDB_CODE_SUCCESS; + char parTbName[TSDB_TABLE_NAME_LEN]; + const SStreamTaskInfo* pStreamInfo = NULL; + + QUERY_CHECK_NULL(pTaskInfo, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pTableName, code, lino, _end, TSDB_CODE_INVALID_PARA); + + *pTableName = NULL; + + pAPI = &pTaskInfo->storageAPI; + code = pAPI->stateStore.streamStateGetParName((void*)pTaskInfo->streamInfo.pState, gid, &tbname, false, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (winCode != TSDB_CODE_SUCCESS) { + parTbName[0] = '\0'; + } else { + tstrncpy(parTbName, tbname, sizeof(parTbName)); + } + pAPI->stateStore.streamStateFreeVal(tbname); + + pStreamInfo = &pTaskInfo->streamInfo; + code = buildSinkDestTableName(parTbName, pStreamInfo->stbFullName, gid, pStreamInfo->newSubTableRule, pTableName); + QUERY_CHECK_CODE(code, lino, _end); + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t streamNotifyFillTableName(const char* tableName, const SStreamNotifyEvent* pEvent, + const SStreamNotifyEvent* pResult, char** pVal) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + static const char* prefix = "{\"tableName\":\""; + uint64_t prefixLen = 0; + uint64_t nameLen = 0; + uint64_t eventLen = 0; + uint64_t resultLen = 0; + uint64_t valLen = 0; + char* val = NULL; + char* p = NULL; + + QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pEvent, code, lino , _end, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(pVal, code, lino , _end, TSDB_CODE_INVALID_PARA); + + *pVal = NULL; + prefixLen = strlen(prefix); + nameLen = strlen(tableName); + eventLen = strlen(pEvent->content); + + if (pResult != NULL) { + resultLen = strlen(pResult->content); + valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + resultLen; + } else { + valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + 1; + } + val = taosMemoryMalloc(valLen); + QUERY_CHECK_NULL(val, code, lino, _end, terrno); + varDataSetLen(val, valLen - VARSTR_HEADER_SIZE); + + p = varDataVal(val); + TAOS_STRNCPY(p, prefix, prefixLen); + p += prefixLen; + TAOS_STRNCPY(p, tableName, nameLen); + p += nameLen; + *(p++) = '\"'; + TAOS_STRNCPY(p, pEvent->content, eventLen); + *p = ','; + + if (pResult != NULL) { + p += eventLen - 1; + TAOS_STRNCPY(p, pResult->content, resultLen); + *p = ','; + } + *pVal = val; + val = NULL; + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (val != NULL) { + taosMemoryFreeClear(val); + } + return code; +} + +int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + SColumnInfoData* pEventStrCol = NULL; + int32_t nWindowEvents = 0; + int32_t nWindowResults = 0; + char* val = NULL; + + if (pTaskInfo == NULL || sup == NULL) { + goto _end; + } + + QUERY_CHECK_NULL(sup->pEventBlock, code, lino, _end, TSDB_CODE_INVALID_PARA); + blockDataCleanup(sup->pEventBlock); + nWindowEvents = taosArrayGetSize(sup->pWindowEvents); + nWindowResults = taosHashGetSize(sup->pResultHashMap); + qDebug("start to build stream notify event block, nWindowEvents: %d, nWindowResults: %d", nWindowEvents, + nWindowResults); + if (nWindowEvents == 0) { + goto _end; + } + + code = blockDataEnsureCapacity(sup->pEventBlock, nWindowEvents); + QUERY_CHECK_CODE(code, lino, _end); + + pEventStrCol = taosArrayGet(sup->pEventBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX); + QUERY_CHECK_NULL(pEventStrCol, code, lino, _end, terrno); + + for (int32_t i = 0; i < nWindowEvents; ++i) { + SStreamNotifyEvent* pResult = NULL; + SStreamNotifyEvent* pEvent = taosArrayGet(sup->pWindowEvents, i); + char* tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid)); + if (tableName == NULL) { + code = streamNotifyGetDestTableName(pTaskInfo, pEvent->gid, &tableName); + QUERY_CHECK_CODE(code, lino, _end); + code = taosHashPut(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid), tableName, strlen(tableName) + 1); + taosMemoryFreeClear(tableName); + QUERY_CHECK_CODE(code, lino, _end); + tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid)); + QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + } + if (pEvent->isEnd) { + pResult = taosHashGet(sup->pResultHashMap, &pEvent->gid, sizeof(pEvent->gid) + sizeof(pEvent->skey)); + QUERY_CHECK_NULL(pResult, code, lino, _end, TSDB_CODE_INTERNAL_ERROR); + } + code = streamNotifyFillTableName(tableName, pEvent, pResult, &val); + QUERY_CHECK_CODE(code, lino, _end); + code = colDataSetVal(pEventStrCol, i, val, false); + QUERY_CHECK_CODE(code, lino, _end); + taosMemoryFreeClear(val); + sup->pEventBlock->info.rows++; + } + + if (taosHashGetMemSize(sup->pTableNameHashMap) >= NOTIFY_EVENT_NAME_CACHE_LIMIT_MB * 1024 * 1024) { + taosHashClear(sup->pTableNameHashMap); + } + +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + if (val != NULL) { + taosMemoryFreeClear(val); + } + if (sup != NULL) { + taosArrayClearEx(sup->pWindowEvents, destroyStreamWindowEvent); + taosHashClear(sup->pResultHashMap); + } + return code; } diff --git a/source/libs/executor/src/streamintervalsliceoperator.c b/source/libs/executor/src/streamintervalsliceoperator.c index d038e4d82c..44799f193b 100644 --- a/source/libs/executor/src/streamintervalsliceoperator.c +++ b/source/libs/executor/src/streamintervalsliceoperator.c @@ -55,6 +55,7 @@ void destroyStreamIntervalSliceOperatorInfo(void* param) { pInfo->pOperator = NULL; } + destroyStreamBasicInfo(&pInfo->basic); clearGroupResInfo(&pInfo->groupResInfo); taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos); pInfo->pUpdated = NULL; @@ -651,7 +652,8 @@ int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiN optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamIntervalSliceReleaseState, streamIntervalSliceReloadState); - initStreamBasicInfo(&pInfo->basic); + code = initStreamBasicInfo(&pInfo->basic); + QUERY_CHECK_CODE(code, lino, _error); if (downstream) { code = initIntervalSliceDownStream(downstream, &pInfo->streamAggSup, pPhyNode->type, pInfo->primaryTsIndex, &pInfo->twAggSup, &pInfo->basic, &pInfo->interval, pInfo->hasInterpoFunc); diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c index 44004a4c6b..4fe8efe397 100644 --- a/source/libs/executor/src/streamtimesliceoperator.c +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -150,6 +150,7 @@ void destroyStreamTimeSliceOperatorInfo(void* param) { &pInfo->groupResInfo); pInfo->pOperator = NULL; } + destroyStreamBasicInfo(&pInfo->basic); colDataDestroy(&pInfo->twAggSup.timeWindowData); destroyStreamAggSupporter(&pInfo->streamAggSup); resetPrevAndNextWindow(pInfo->pFillSup); @@ -2201,7 +2202,8 @@ int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); setOperatorStreamStateFn(pOperator, streamTimeSliceReleaseState, streamTimeSliceReloadState); - initStreamBasicInfo(&pInfo->basic); + code = initStreamBasicInfo(&pInfo->basic); + QUERY_CHECK_CODE(code, lino, _error); if (downstream) { code = initTimeSliceDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex, &pInfo->twAggSup, &pInfo->basic, pInfo->pFillSup); diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 707018ac65..efe16ce662 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -771,7 +771,35 @@ bool getSumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) { return true; } +static bool funcNotSupportStringSma(SFunctionNode* pFunc) { + SNode* pParam; + switch (pFunc->funcType) { + case FUNCTION_TYPE_MAX: + case FUNCTION_TYPE_MIN: + case FUNCTION_TYPE_SUM: + case FUNCTION_TYPE_AVG: + case FUNCTION_TYPE_AVG_PARTIAL: + case FUNCTION_TYPE_PERCENTILE: + case FUNCTION_TYPE_SPREAD: + case FUNCTION_TYPE_SPREAD_PARTIAL: + case FUNCTION_TYPE_SPREAD_MERGE: + case FUNCTION_TYPE_TWA: + case FUNCTION_TYPE_ELAPSED: + pParam = nodesListGetNode(pFunc->pParameterList, 0); + if (pParam && nodesIsExprNode(pParam) && (IS_VAR_DATA_TYPE(((SExprNode*)pParam)->resType.type))) { + return true; + } + break; + default: + break; + } + return false; +} + EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) { + if(funcNotSupportStringSma(pFunc)) { + return FUNC_DATA_REQUIRED_DATA_LOAD; + } return FUNC_DATA_REQUIRED_SMA_LOAD; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index bea9b96215..bfe86aa2ac 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -99,6 +99,8 @@ const char* nodesNodeName(ENodeType type) { return "CountWindow"; case QUERY_NODE_ANOMALY_WINDOW: return "AnomalyWindow"; + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return "StreamNotifyOptions"; case QUERY_NODE_SET_OPERATOR: return "SetOperator"; case QUERY_NODE_SELECT_STMT: @@ -5812,6 +5814,45 @@ static int32_t jsonToStreamOptions(const SJson* pJson, void* pObj) { return code; } +static const char* jkStreamNotifyOptionsAddrUrls = "AddrUrls"; +static const char* jkStreamNotifyOptionsEventType = "EventType"; +static const char* jkStreamNotifyOptionsErrorHandle = "ErrorHandle"; +static const char* jkStreamNotifyOptionsNotifyHistory = "NotifyHistory"; + +static int32_t streamNotifyOptionsToJson(const void* pObj, SJson* pJson) { + const SStreamNotifyOptions* pNotifyOption = (const SStreamNotifyOptions*)pObj; + int32_t code = nodeListToJson(pJson, jkStreamNotifyOptionsAddrUrls, pNotifyOption->pAddrUrls); + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsEventType, pNotifyOption->eventTypes); + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsErrorHandle, pNotifyOption->errorHandle); + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonAddBoolToObject(pJson, jkStreamNotifyOptionsNotifyHistory, pNotifyOption->notifyHistory); + } + + return code; +} + +static int32_t jsonToStreamNotifyOptions(const SJson* pJson, void* pObj) { + SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pObj; + int32_t code = jsonToNodeList(pJson, jkStreamNotifyOptionsAddrUrls, &pNotifyOption->pAddrUrls); + int32_t val = 0; + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsEventType, &val); + pNotifyOption->eventTypes = val; + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsErrorHandle, &val); + pNotifyOption->errorHandle = val; + } + if (code == TSDB_CODE_SUCCESS) { + code = tjsonGetBoolValue(pJson, jkStreamNotifyOptionsNotifyHistory, &pNotifyOption->notifyHistory); + } + return code; +} + static const char* jkWhenThenWhen = "When"; static const char* jkWhenThenThen = "Then"; @@ -7207,6 +7248,7 @@ static const char* jkCreateStreamStmtOptions = "Options"; static const char* jkCreateStreamStmtQuery = "Query"; static const char* jkCreateStreamStmtTags = "Tags"; static const char* jkCreateStreamStmtSubtable = "Subtable"; +static const char* jkCreateStreamStmtNotifyOptions = "NotifyOptions"; static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) { const SCreateStreamStmt* pNode = (const SCreateStreamStmt*)pObj; @@ -7233,6 +7275,9 @@ static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddObject(pJson, jkCreateStreamStmtSubtable, nodeToJson, pNode->pSubtable); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkCreateStreamStmtNotifyOptions, nodeToJson, pNode->pNotifyOptions); + } return code; } @@ -7262,6 +7307,9 @@ static int32_t jsonToCreateStreamStmt(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkCreateStreamStmtSubtable, &pNode->pSubtable); } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeObject(pJson, jkCreateStreamStmtNotifyOptions, (SNode**)&pNode->pNotifyOptions); + } return code; } @@ -8029,6 +8077,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return countWindowNodeToJson(pObj, pJson); case QUERY_NODE_ANOMALY_WINDOW: return anomalyWindowNodeToJson(pObj, pJson); + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return streamNotifyOptionsToJson(pObj, pJson); case QUERY_NODE_SET_OPERATOR: return setOperatorToJson(pObj, pJson); case QUERY_NODE_SELECT_STMT: @@ -8402,6 +8452,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToCountWindowNode(pJson, pObj); case QUERY_NODE_ANOMALY_WINDOW: return jsonToAnomalyWindowNode(pJson, pObj); + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + return jsonToStreamNotifyOptions(pJson, pObj); case QUERY_NODE_SET_OPERATOR: return jsonToSetOperator(pJson, pObj); case QUERY_NODE_SELECT_STMT: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 3d4df385f7..ae5b302d2d 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -467,6 +467,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) { case QUERY_NODE_WINDOW_OFFSET: code = makeNode(type, sizeof(SWindowOffsetNode), &pNode); break; + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: + code = makeNode(type, sizeof(SStreamNotifyOptions), &pNode); + break; case QUERY_NODE_SET_OPERATOR: code = makeNode(type, sizeof(SSetOperator), &pNode); break; @@ -1267,6 +1270,11 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pAround->pTimepoint); break; } + case QUERY_NODE_STREAM_NOTIFY_OPTIONS: { + SStreamNotifyOptions* pNotifyOptions = (SStreamNotifyOptions*)pNode; + nodesDestroyList(pNotifyOptions->pAddrUrls); + break; + } case QUERY_NODE_SET_OPERATOR: { SSetOperator* pStmt = (SSetOperator*)pNode; nodesDestroyList(pStmt->pProjectionList); @@ -1479,6 +1487,7 @@ void nodesDestroyNode(SNode* pNode) { nodesDestroyNode(pStmt->pQuery); nodesDestroyList(pStmt->pTags); nodesDestroyNode(pStmt->pSubtable); + nodesDestroyNode((SNode*)pStmt->pNotifyOptions); tFreeSCMCreateStreamReq(pStmt->pReq); taosMemoryFreeClear(pStmt->pReq); break; diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index dc9986ad04..387bccf358 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -296,8 +296,12 @@ SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con SNode* createStreamOptions(SAstCreateContext* pCxt); SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptionsSetFlag setflag, SToken* pToken, SNode* pNode); +SNode* createStreamNotifyOptions(SAstCreateContext *pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes); +SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag, + SToken* pToken); SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable, - SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols); + SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols, + SNode* pNotifyOptions); SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName); SNode* createPauseStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName); SNode* createResumeStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, bool ignoreUntreated, SToken* pStreamName); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 7f383afe48..439af13d71 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -785,7 +785,7 @@ full_view_name(A) ::= db_name(B) NK_DOT view_name(C). /************************************************ create/drop stream **************************************************/ cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) stream_options(B) INTO full_table_name(C) col_list_opt(H) tag_def_or_ref_opt(F) subtable_opt(G) - AS query_or_subquery(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H); } + AS query_or_subquery(D) notify_opt(I). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H, I); } cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); } cmd ::= PAUSE STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createPauseStreamStmt(pCxt, A, &B); } cmd ::= RESUME STREAM exists_opt(A) ignore_opt(C) stream_name(B). { pCxt->pRootNode = createResumeStreamStmt(pCxt, A, C, &B); } @@ -832,6 +832,26 @@ subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP. ignore_opt(A) ::= . { A = false; } ignore_opt(A) ::= IGNORE UNTREATED. { A = true; } +notify_opt(A) ::= . { A = NULL; } +notify_opt(A) ::= notify_def(B). { A = B; } + +notify_def(A) ::= NOTIFY NK_LP url_def_list(B) NK_RP ON NK_LP event_def_list(C) NK_RP. { A = createStreamNotifyOptions(pCxt, B, C); } +notify_def(A) ::= notify_def(B) ON_FAILURE DROP(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); } +notify_def(A) ::= notify_def(B) ON_FAILURE PAUSE(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); } +notify_def(A) ::= notify_def(B) NOTIFY_HISTORY NK_INTEGER(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_NOTIFY_HISTORY_SET, &C); } + +%type url_def_list { SNodeList* } +%destructor url_def_list { nodesDestroyList($$); } +url_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); } +url_def_list(A) ::= url_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); } + +%type event_def_list { SNodeList* } +%destructor event_def_list { nodesDestroyList($$); } +event_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); } +event_def_list(A) ::= event_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); } + + + /************************************************ kill connection/query ***********************************************/ cmd ::= KILL CONNECTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &A); } cmd ::= KILL QUERY NK_STRING(A). { pCxt->pRootNode = createKillQueryStmt(pCxt, &A); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 5b90fd601e..c875cbad05 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1526,8 +1526,8 @@ SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhe pCaseWhen->pCase = pCase; pCaseWhen->pWhenThenList = pWhenThenList; pCaseWhen->pElse = pElse; - pCaseWhen->tz = pCxt->pQueryCxt->timezone; - pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt; + pCaseWhen->tz = pCxt->pQueryCxt->timezone; + pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt; return (SNode*)pCaseWhen; _err: nodesDestroyNode(pCase); @@ -3657,8 +3657,115 @@ SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptions return pOptions; } +static bool validateNotifyUrl(const char* url) { + const char* prefix[] = {"http://", "https://", "ws://", "wss://"}; + const char* host = NULL; + + if (!url || *url == '\0') return false; + + for (int32_t i = 0; i < ARRAY_SIZE(prefix); ++i) { + if (strncasecmp(url, prefix[i], strlen(prefix[i])) == 0) { + host = url + strlen(prefix[i]); + break; + } + } + + return (host != NULL) && (*host != '\0') && (*host != '/'); +} + +SNode* createStreamNotifyOptions(SAstCreateContext* pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes) { + SNode* pNode = NULL; + EStreamNotifyEventType eventTypes = 0; + const char* eWindowOpenStr = "WINDOW_OPEN"; + const char* eWindowCloseStr = "WINDOW_CLOSE"; + + CHECK_PARSER_STATUS(pCxt); + + if (LIST_LENGTH(pAddrUrls) == 0) { + pCxt->errCode = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "notification address cannot be empty"); + goto _err; + } + + FOREACH(pNode, pAddrUrls) { + char *url = ((SValueNode*)pNode)->literal; + if (strlen(url) >= TSDB_STREAM_NOTIFY_URL_LEN) { + pCxt->errCode = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "notification address \"%s\" exceed maximum length %d", url, TSDB_STREAM_NOTIFY_URL_LEN); + goto _err; + } + if (!validateNotifyUrl(url)) { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "invalid notification address \"%s\"", url); + goto _err; + } + } + + if (LIST_LENGTH(pEventTypes) == 0) { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "event types must be specified for notification"); + goto _err; + } + + FOREACH(pNode, pEventTypes) { + char *eventStr = ((SValueNode *)pNode)->literal; + if (strncasecmp(eventStr, eWindowOpenStr, strlen(eWindowOpenStr) + 1) == 0) { + BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_OPEN); + } else if (strncasecmp(eventStr, eWindowCloseStr, strlen(eWindowCloseStr) + 1) == 0) { + BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE); + } else { + pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "invalid event type '%s' for notification", eventStr); + goto _err; + } + } + + SStreamNotifyOptions* pNotifyOptions = NULL; + pCxt->errCode = nodesMakeNode(QUERY_NODE_STREAM_NOTIFY_OPTIONS, (SNode**)&pNotifyOptions); + CHECK_MAKE_NODE(pNotifyOptions); + pNotifyOptions->pAddrUrls = pAddrUrls; + pNotifyOptions->eventTypes = eventTypes; + pNotifyOptions->errorHandle = SNOTIFY_ERROR_HANDLE_PAUSE; + pNotifyOptions->notifyHistory = false; + nodesDestroyList(pEventTypes); + return (SNode*)pNotifyOptions; +_err: + nodesDestroyList(pAddrUrls); + nodesDestroyList(pEventTypes); + return NULL; +} + +SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag, + SToken* pToken) { + CHECK_PARSER_STATUS(pCxt); + + SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pNode; + if (BIT_FLAG_TEST_MASK(pNotifyOption->setFlag, setFlag)) { + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, + "stream notify options each item can only be set once"); + goto _err; + } + switch (setFlag) { + case SNOTIFY_OPT_ERROR_HANDLE_SET: + pNotifyOption->errorHandle = (pToken->type == TK_DROP) ? SNOTIFY_ERROR_HANDLE_DROP : SNOTIFY_ERROR_HANDLE_PAUSE; + break; + case SNOTIFY_OPT_NOTIFY_HISTORY_SET: + pNotifyOption->notifyHistory = taosStr2Int8(pToken->z, NULL, 10); + break; + default: + break; + } + BIT_FLAG_SET_MASK(pNotifyOption->setFlag, setFlag); + return pNode; +_err: + nodesDestroyNode(pNode); + return NULL; +} + SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable, - SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols) { + SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols, + SNode* pNotifyOptions) { CHECK_PARSER_STATUS(pCxt); CHECK_NAME(checkStreamName(pCxt, pStreamName)); SCreateStreamStmt* pStmt = NULL; @@ -3674,6 +3781,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken pStmt->pTags = pTags; pStmt->pSubtable = pSubtable; pStmt->pCols = pCols; + pStmt->pNotifyOptions = (SStreamNotifyOptions*)pNotifyOptions; return (SNode*)pStmt; _err: nodesDestroyNode(pRealTable); @@ -3682,6 +3790,7 @@ _err: nodesDestroyList(pTags); nodesDestroyNode(pSubtable); nodesDestroyList(pCols); + nodesDestroyNode(pNotifyOptions); return NULL; } diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 67ad874b15..5ff6e4f555 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -2751,6 +2751,9 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS if (TSDB_CODE_SUCCESS == code && hasData) { code = parseInsertTableClause(pCxt, pStmt, &token); } + if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code && pCxt->preCtbname) { + code = TSDB_CODE_TSC_STMT_TBNAME_ERROR; + } } if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) { diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index ea2e9d712f..7ed438a7dc 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -355,6 +355,9 @@ static SKeyword keywordTable[] = { {"FORCE_WINDOW_CLOSE", TK_FORCE_WINDOW_CLOSE}, {"DISK_INFO", TK_DISK_INFO}, {"AUTO", TK_AUTO}, + {"NOTIFY", TK_NOTIFY}, + {"ON_FAILURE", TK_ON_FAILURE}, + {"NOTIFY_HISTORY", TK_NOTIFY_HISTORY}, }; // clang-format on diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 1d87b83e62..74dd1be614 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -12192,6 +12192,45 @@ static int32_t translateStreamOptions(STranslateContext* pCxt, SCreateStreamStmt return TSDB_CODE_SUCCESS; } +static int32_t buildStreamNotifyOptions(STranslateContext* pCxt, SStreamNotifyOptions* pNotifyOptions, + SCMCreateStreamReq* pReq) { + int32_t code = TSDB_CODE_SUCCESS; + SNode* pNode = NULL; + + if (pNotifyOptions == NULL || pNotifyOptions->pAddrUrls->length == 0) { + return code; + } + + pReq->pNotifyAddrUrls = taosArrayInit(pNotifyOptions->pAddrUrls->length, POINTER_BYTES); + if (pReq->pNotifyAddrUrls != NULL) { + FOREACH(pNode, pNotifyOptions->pAddrUrls) { + char *url = taosStrndup(((SValueNode*)pNode)->literal, TSDB_STREAM_NOTIFY_URL_LEN); + if (url == NULL) { + code = terrno; + break; + } + if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) { + code = terrno; + taosMemoryFreeClear(url); + break; + } + } + } else { + code = terrno; + } + + if (code == TSDB_CODE_SUCCESS) { + pReq->notifyEventTypes = pNotifyOptions->eventTypes; + pReq->notifyErrorHandle = pNotifyOptions->errorHandle; + pReq->notifyHistory = pNotifyOptions->notifyHistory; + } else { + taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL); + pReq->pNotifyAddrUrls = NULL; + } + + return code; +} + static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { pReq->igExists = pStmt->ignoreExists; @@ -12238,6 +12277,10 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } } + if (TSDB_CODE_SUCCESS == code) { + code = buildStreamNotifyOptions(pCxt, pStmt->pNotifyOptions, pReq); + } + return code; } diff --git a/source/libs/parser/test/parAlterToBalanceTest.cpp b/source/libs/parser/test/parAlterToBalanceTest.cpp index a81076557e..172c729f34 100644 --- a/source/libs/parser/test/parAlterToBalanceTest.cpp +++ b/source/libs/parser/test/parAlterToBalanceTest.cpp @@ -196,23 +196,16 @@ TEST_F(ParserInitialATest, alterDatabase) { setAlterDbFsync(200); setAlterDbWal(1); setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW); -#ifndef _STORAGE - setAlterDbSttTrigger(-1); -#else setAlterDbSttTrigger(16); -#endif setAlterDbBuffer(16); setAlterDbPages(128); setAlterDbReplica(3); setAlterDbWalRetentionPeriod(10); setAlterDbWalRetentionSize(20); -#ifndef _STORAGE run("ALTER DATABASE test BUFFER 16 CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 PAGES 128 " - "REPLICA 3 WAL_LEVEL 1 WAL_RETENTION_PERIOD 10 WAL_RETENTION_SIZE 20"); -#else - run("ALTER DATABASE test BUFFER 16 CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 PAGES 128 " - "REPLICA 3 WAL_LEVEL 1 STT_TRIGGER 16 WAL_RETENTION_PERIOD 10 WAL_RETENTION_SIZE 20"); -#endif + "REPLICA 3 WAL_LEVEL 1 " + "STT_TRIGGER 16 " + "WAL_RETENTION_PERIOD 10 WAL_RETENTION_SIZE 20"); clearAlterDbReq(); initAlterDb("test"); diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index 7d778d9c0b..2412bf4e78 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -292,11 +292,7 @@ TEST_F(ParserInitialCTest, createDatabase) { setDbWalRetentionSize(-1); setDbWalRollPeriod(10); setDbWalSegmentSize(20); -#ifndef _STORAGE setDbSstTrigger(1); -#else - setDbSstTrigger(16); -#endif setDbHashPrefix(3); setDbHashSuffix(4); setDbTsdbPageSize(32); @@ -354,7 +350,7 @@ TEST_F(ParserInitialCTest, createDatabase) { "WAL_RETENTION_SIZE -1 " "WAL_ROLL_PERIOD 10 " "WAL_SEGMENT_SIZE 20 " - "STT_TRIGGER 16 " + "STT_TRIGGER 1 " "TABLE_PREFIX 3 " "TABLE_SUFFIX 4 " "TSDB_PAGESIZE 32"); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 42d7f44b62..baf36d0453 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -735,7 +735,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S !alreadyAddGroupId(pDataBlock->info.parTbName, groupId) && groupId != 0) { if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) { code = buildCtbNameAddGroupId(NULL, pDataBlock->info.parTbName, groupId, sizeof(pDataBlock->info.parTbName)); - } else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER) { + } else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER) { code = buildCtbNameAddGroupId(pTask->outputInfo.shuffleDispatcher.stbFullName, pDataBlock->info.parTbName, groupId, sizeof(pDataBlock->info.parTbName)); } diff --git a/source/libs/stream/src/streamErrorInjection.c b/source/libs/stream/src/streamErrorInjection.c index 515845ba2b..8bbe403dcc 100644 --- a/source/libs/stream/src/streamErrorInjection.c +++ b/source/libs/stream/src/streamErrorInjection.c @@ -14,4 +14,4 @@ void chkptFailedByRetrieveReqToSource(SStreamTask* pTask, int64_t checkpointId) // the checkpoint interval should be 60s, and the next checkpoint req should be issued by mnode taosMsleep(65*1000); -} \ No newline at end of file +} diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index 53b6a38b35..7c157bb05e 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -331,7 +331,6 @@ void streamMetaHbToMnode(void* param, void* tmrId) { } else { stError("vgId:%d jump out of meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } -// taosMemoryFree(param); return; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 0de256d86d..c2a758f490 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -198,6 +198,7 @@ int32_t streamMetaCheckBackendCompatible(SStreamMeta* pMeta) { SCheckpointInfo info; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) { + tDecoderClear(&decoder); continue; } @@ -576,6 +577,7 @@ void streamMetaClose(SStreamMeta* pMeta) { if (pMeta == NULL) { return; } + int32_t code = taosRemoveRef(streamMetaRefPool, pMeta->rid); if (code) { stError("vgId:%d failed to remove meta ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code)); @@ -1031,6 +1033,7 @@ int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta) { SCheckpointInfo info; tDecoderInit(&decoder, (uint8_t*)pVal, vLen); if (tDecodeStreamTaskChkInfo(&decoder, &info) < 0) { + tDecoderClear(&decoder); continue; } tDecoderClear(&decoder); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index d27ed520c6..5ee8bd43f5 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -326,6 +326,11 @@ void tFreeStreamTask(void* pParam) { streamTaskDestroyActiveChkptInfo(pTask->chkInfo.pActiveInfo); pTask->chkInfo.pActiveInfo = NULL; + taosArrayDestroyP(pTask->notifyInfo.pNotifyAddrUrls, NULL); + taosMemoryFreeClear(pTask->notifyInfo.streamName); + taosMemoryFreeClear(pTask->notifyInfo.stbFullName); + tDeleteSchemaWrapper(pTask->notifyInfo.pSchemaWrapper); + taosMemoryFree(pTask); stDebug("s-task:0x%x free task completed", taskId); } @@ -1318,6 +1323,78 @@ void streamTaskFreeRefId(int64_t* pRefId) { metaRefMgtRemove(pRefId); } +static int32_t tEncodeStreamNotifyInfo(SEncoder* pEncoder, const SNotifyInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + QUERY_CHECK_NULL(pEncoder, code, lino, _exit, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA); + + int32_t addrSize = taosArrayGetSize(info->pNotifyAddrUrls); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, addrSize)); + for (int32_t i = 0; i < addrSize; ++i) { + const char* url = taosArrayGetP(info->pNotifyAddrUrls, i); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, url)); + } + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyEventTypes)); + TAOS_CHECK_EXIT(tEncodeI32(pEncoder, info->notifyErrorHandle)); + if (addrSize > 0) { + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->streamName)); + TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, info->stbFullName)); + TAOS_CHECK_EXIT(tEncodeSSchemaWrapper(pEncoder, info->pSchemaWrapper)); + } + +_exit: + if (code != TSDB_CODE_SUCCESS) { + stError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} + +static int32_t tDecodeStreamNotifyInfo(SDecoder* pDecoder, SNotifyInfo* info) { + int32_t code = TSDB_CODE_SUCCESS; + int32_t lino = 0; + + QUERY_CHECK_NULL(pDecoder, code, lino, _exit, TSDB_CODE_INVALID_PARA); + QUERY_CHECK_NULL(info, code, lino, _exit, TSDB_CODE_INVALID_PARA); + + int32_t addrSize = 0; + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &addrSize)); + info->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES); + QUERY_CHECK_NULL(info->pNotifyAddrUrls, code, lino, _exit, terrno); + for (int32_t i = 0; i < addrSize; ++i) { + char *url = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &url)); + url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN); + QUERY_CHECK_NULL(url, code, lino, _exit, terrno); + if (taosArrayPush(info->pNotifyAddrUrls, &url) == NULL) { + taosMemoryFree(url); + TAOS_CHECK_EXIT(terrno); + } + } + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyEventTypes)); + TAOS_CHECK_EXIT(tDecodeI32(pDecoder, &info->notifyErrorHandle)); + if (addrSize > 0) { + char* name = NULL; + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name)); + info->streamName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1); + QUERY_CHECK_NULL(info->streamName, code, lino, _exit, terrno); + TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &name)); + info->stbFullName = taosStrndup(name, TSDB_STREAM_FNAME_LEN + 1); + QUERY_CHECK_NULL(info->stbFullName, code, lino, _exit, terrno); + info->pSchemaWrapper = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); + if (info->pSchemaWrapper == NULL) { + TAOS_CHECK_EXIT(terrno); + } + TAOS_CHECK_EXIT(tDecodeSSchemaWrapper(pDecoder, info->pSchemaWrapper)); + } + +_exit: + if (code != TSDB_CODE_SUCCESS) { + stError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + return code; +} int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { int32_t code = 0; @@ -1388,6 +1465,10 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTask->subtableWithoutMd5)); TAOS_CHECK_EXIT(tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1)); + if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) { + TAOS_CHECK_EXIT(tEncodeStreamNotifyInfo(pEncoder, &pTask->notifyInfo)); + } + tEndEncode(pEncoder); _exit: return code; @@ -1486,8 +1567,12 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { } TAOS_CHECK_EXIT(tDecodeCStrTo(pDecoder, pTask->reserve)); + if (pTask->ver >= SSTREAM_TASK_ADD_NOTIFY_VER) { + TAOS_CHECK_EXIT(tDecodeStreamNotifyInfo(pDecoder, &pTask->notifyInfo)); + } + tEndDecode(pDecoder); _exit: return code; -} \ No newline at end of file +} diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 3022a1f8ac..18252db9ee 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -732,7 +732,11 @@ int32_t syncFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, SyncTe pEntry->index, pEntry->term, TMSG_INFO(pEntry->originalRpcType), code, retry); if (retry) { taosMsleep(10); - sError("vgId:%d, retry on fsm commit since %s. index:%" PRId64, pNode->vgId, tstrerror(code), pEntry->index); + if (code == TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE) { + sError("vgId:%d, failed to execute fsm since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); + } else { + sDebug("vgId:%d, retry on fsm commit since %s. index:%" PRId64, pNode->vgId, terrstr(), pEntry->index); + } } } while (retry); diff --git a/source/libs/tcs/test/tcsTest.cpp b/source/libs/tcs/test/tcsTest.cpp index 40d9eac7a0..86f2b70486 100644 --- a/source/libs/tcs/test/tcsTest.cpp +++ b/source/libs/tcs/test/tcsTest.cpp @@ -234,6 +234,13 @@ TEST(TcsTest, InterfaceTest) { // TEST(TcsTest, DISABLED_InterfaceNonBlobTest) { TEST(TcsTest, InterfaceNonBlobTest) { +#ifndef TD_ENTERPRISE + // NOTE: this test case will coredump for community edition of taos + // thus we bypass this test case for the moment + // code = tcsGetObjectBlock(object_name, 0, size, check, &pBlock); + // tcsGetObjectBlock succeeded but pBlock is nullptr + // which results in nullptr-access-coredump shortly after +#else int code = 0; bool check = false; bool withcp = false; @@ -348,4 +355,5 @@ TEST(TcsTest, InterfaceNonBlobTest) { GTEST_ASSERT_EQ(code, 0); tcsUninit(); +#endif } diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 4f5ca8d789..03ef00a0c0 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -1490,3 +1490,32 @@ bool taosAssertRelease(bool condition) { return true; } #endif + +char* u64toaFastLut(uint64_t val, char* buf) { + static const char* lut = + "0001020304050607080910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455" + "5657585960616263646566676869707172737475767778798081828384858687888990919293949596979899"; + + char temp[24]; + char* p = temp; + + while (val >= 100) { + strncpy(p, lut + (val % 100) * 2, 2); + val /= 100; + p += 2; + } + + if (val >= 10) { + strncpy(p, lut + val * 2, 2); + p += 2; + } else if (val > 0 || p == temp) { + *(p++) = val + '0'; + } + + while (p != temp) { + *buf++ = *--p; + } + + *buf = '\0'; + return buf; +} diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c index f531d9ad61..db75c6b2ff 100644 --- a/source/util/src/tqueue.c +++ b/source/util/src/tqueue.c @@ -14,14 +14,16 @@ */ #define _DEFAULT_SOURCE -#include "tqueue.h" #include "taoserror.h" #include "tlog.h" +#include "tqueue.h" #include "tutil.h" int64_t tsQueueMemoryAllowed = 0; int64_t tsQueueMemoryUsed = 0; +int64_t tsApplyMemoryAllowed = 0; +int64_t tsApplyMemoryUsed = 0; struct STaosQueue { STaosQnode *head; STaosQnode *tail; @@ -148,20 +150,34 @@ int64_t taosQueueMemorySize(STaosQueue *queue) { } int32_t taosAllocateQitem(int32_t size, EQItype itype, int64_t dataSize, void **item) { - int64_t alloced = atomic_add_fetch_64(&tsQueueMemoryUsed, size + dataSize); + int64_t alloced = -1; + if (alloced > tsQueueMemoryAllowed) { + alloced = atomic_add_fetch_64(&tsQueueMemoryUsed, size + dataSize); if (itype == RPC_QITEM) { uError("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced, tsQueueMemoryAllowed); (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize); return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE); } + } else if (itype == APPLY_QITEM) { + alloced = atomic_add_fetch_64(&tsApplyMemoryUsed, size + dataSize); + if (alloced > tsApplyMemoryAllowed) { + uDebug("failed to alloc qitem, size:%" PRId64 " alloc:%" PRId64 " allowed:%" PRId64, size + dataSize, alloced, + tsApplyMemoryAllowed); + (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); + return (terrno = TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE); + } } *item = NULL; STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size); if (pNode == NULL) { - (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize); + if (itype == RPC_QITEM) { + (void)atomic_sub_fetch_64(&tsQueueMemoryUsed, size + dataSize); + } else if (itype == APPLY_QITEM) { + (void)atomic_sub_fetch_64(&tsApplyMemoryUsed, size + dataSize); + } return terrno; } @@ -178,7 +194,12 @@ void taosFreeQitem(void *pItem) { if (pItem == NULL) return; STaosQnode *pNode = (STaosQnode *)((char *)pItem - sizeof(STaosQnode)); - int64_t alloced = atomic_sub_fetch_64(&tsQueueMemoryUsed, pNode->size + pNode->dataSize); + int64_t alloced = -1; + if (pNode->itype == RPC_QITEM) { + alloced = atomic_sub_fetch_64(&tsQueueMemoryUsed, pNode->size + pNode->dataSize); + } else if (pNode->itype == APPLY_QITEM) { + alloced = atomic_sub_fetch_64(&tsApplyMemoryUsed, pNode->size + pNode->dataSize); + } uTrace("item:%p, node:%p is freed, alloc:%" PRId64, pItem, pNode, alloced); taosMemoryFree(pNode); diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index ec05a4e415..768e465fea 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -142,10 +142,6 @@ target_include_directories( PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" ) -IF(COMPILER_SUPPORT_AVX2) - MESSAGE(STATUS "AVX2 instructions is ACTIVATED") - set_source_files_properties(decompressTest.cpp PROPERTIES COMPILE_FLAGS -mavx2) -ENDIF() add_executable(decompressTest "decompressTest.cpp") target_link_libraries(decompressTest os util common gtest_main) add_test( diff --git a/source/util/test/decompressTest.cpp b/source/util/test/decompressTest.cpp index e508c489df..b1f7f7e85c 100644 --- a/source/util/test/decompressTest.cpp +++ b/source/util/test/decompressTest.cpp @@ -524,23 +524,20 @@ static void decompressBasicTest(size_t dataSize, const CompF& compress, const De decltype(origData) decompData(origData.size()); // test simple implementation without SIMD instructions - tsSIMDEnable = 0; + tsAVX2Supported = 0; cnt = decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), ONE_STAGE_COMP, nullptr, 0); ASSERT_EQ(cnt, compData.size() - 1); EXPECT_EQ(origData, decompData); -#ifdef __AVX2__ - if (DataTypeSupportAvx::value) { + taosGetSystemInfo(); + if (DataTypeSupportAvx::value && tsAVX2Supported) { // test AVX2 implementation - tsSIMDEnable = 1; - tsAVX2Supported = 1; cnt = decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), ONE_STAGE_COMP, nullptr, 0); ASSERT_EQ(cnt, compData.size() - 1); EXPECT_EQ(origData, decompData); } -#endif } template @@ -557,7 +554,7 @@ static void decompressPerfTest(const char* typname, const CompF& compress, const << "; Compression ratio: " << 1.0 * (compData.size() - 1) / cnt << "\n"; decltype(origData) decompData(origData.size()); - tsSIMDEnable = 0; + tsAVX2Supported = 0; auto ms = measureRunTime( [&]() { decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), @@ -567,10 +564,8 @@ static void decompressPerfTest(const char* typname, const CompF& compress, const std::cout << "Decompression of " << NROUND * DATA_SIZE << " " << typname << " without SIMD costs " << ms << " ms, avg speed: " << NROUND * DATA_SIZE * 1000 / ms << " tuples/s\n"; -#ifdef __AVX2__ - if (DataTypeSupportAvx::value) { - tsSIMDEnable = 1; - tsAVX2Supported = 1; + taosGetSystemInfo(); + if (DataTypeSupportAvx::value && tsAVX2Supported) { ms = measureRunTime( [&]() { decompress(compData.data(), compData.size(), decompData.size(), decompData.data(), decompData.size(), @@ -580,7 +575,6 @@ static void decompressPerfTest(const char* typname, const CompF& compress, const std::cout << "Decompression of " << NROUND * DATA_SIZE << " " << typname << " using AVX2 costs " << ms << " ms, avg speed: " << NROUND * DATA_SIZE * 1000 / ms << " tuples/s\n"; } -#endif } #define RUN_PERF_TEST(typname, comp, decomp, min, max) \ diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..58747d93f7 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,233 @@ +# Table of Contents + +1. [Introduction](#1-introduction) +1. [Prerequisites](#2-prerequisites) +1. [Testing Guide](#3-testing-guide) + - [3.1 Unit Test](#31-unit-test) + - [3.2 System Test](#32-system-test) + - [3.3 Legacy Test](#33-legacy-test) + - [3.4 Smoke Test](#34-smoke-test) + - [3.5 Chaos Test](#35-chaos-test) + - [3.6 CI Test](#36-ci-test) + +# 1. Introduction + +This manual is intended to give developers a comprehensive guidance to test TDengine efficiently. It is divided into three main sections: introduction, prerequisites and testing guide. + +> [!NOTE] +> - The commands and scripts below are verified on Linux (Ubuntu 18.04/20.04/22.04). +> - The commands and steps described below are to run the tests on a single host. + +# 2. Prerequisites + +- Install Python3 + +```bash +apt install python3 +apt install python3-pip +``` + +- Install Python dependencies + +```bash +pip3 install pandas psutil fabric2 requests faker simplejson \ + toml pexpect tzlocal distro decorator loguru hyperloglog +``` + +- Install Python connector for TDengine + +```bash +pip3 install taospy taos-ws-py +``` + +- Building + +Before testing, please make sure the building operation with option `-DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true` has been done, otherwise execute commands below: + +```bash +cd debug +cmake .. -DBUILD_TOOLS=true -DBUILD_TEST=true -DBUILD_CONTRIB=true +make && make install +``` + +# 3. Testing Guide + +In `tests` directory, there are different types of tests for TDengine. Below is a brief introduction about how to run them and how to add new cases. + +### 3.1 Unit Test + +Unit tests are the smallest testable units, which are used to test functions, methods or classes in TDengine code. + +### 3.1.1 How to run single test case? + +```bash +cd debug/build/bin +./osTimeTests +``` + +### 3.1.2 How to run all unit test cases? + +```bash +cd tests/unit-test/ +bash test.sh -e 0 +``` + +### 3.1.3 How to add new cases? + +
+ +Detailed steps to add new unit test case + +The Google test framwork is used for unit testing to specific function module, please refer to steps below to add a new test case: + +##### a. Create test case file and develop the test scripts + +In the test directory corresponding to the target function module, create test files in CPP format and write corresponding test cases. + +##### b. Update build configuration + +Modify the CMakeLists.txt file in this directory to ensure that the new test files are properly included in the compilation process. See the `source/os/test/CMakeLists.txt` file for configuration examples. + +##### c. Compile test code + +In the root directory of the project, create a compilation directory (e.g., debug), switch to the directory and run CMake commands (e.g., `cmake .. -DBUILD_TEST=1`) to generate a compilation file, + +and then run a compilation command (e.g. make) to complete the compilation of the test code. + +##### d. Execute the test program + +Find the executable file in the compiled directory(e.g. `TDengine/debug/build/bin/`) and run it. + +##### e. Integrate into CI tests + +Use the add_test command to add new compiled test cases into CI test collection, ensure that the new added test cases can be run for every build. + +
+ +## 3.2 System Test + +System tests are end-to-end test cases written in Python from a system point of view. Some of them are designed to test features only in enterprise ediiton, so when running on community edition, they may fail. We'll fix this issue by separating the cases into different gruops in the future. + +### 3.2.1 How to run a single test case? + +Take test file `system-test/2-query/avg.py` for example: + +```bash +cd tests/system-test +python3 ./test.py -f 2-query/avg.py +``` + +### 3.2.2 How to run all system test cases? + +```bash +cd tests +./run_all_ci_cases.sh -t python # all python cases +``` + +### 3.2.3 How to add new case? + +
+ +Detailed steps to add new system test case + +The Python test framework is developed by TDengine team, and test.py is the test case execution and monitoring of the entry program, Use `python3 ./test.py -h` to view more features. + +Please refer to steps below for how to add a new test case: + +##### a. Create a test case file and develop the test cases + +Create a file in `tests/system-test` containing each functional directory and refer to the use case template `tests/system-test/0-others/test_case_template.py` to add a new test case. + +##### b. Execute the test case + +Ensure the test case execution is successful. + +``` bash +cd tests/system-test && python3 ./test.py -f 0-others/test_case_template.py +``` + +##### c. Integrate into CI tests + +Edit `tests/parallel_test/cases.task` and add the testcase path and executions in the specified format. The third column indicates whether to use Address Sanitizer mode for testing. + +```bash +#caseID,rerunTimes,Run with Sanitizer,casePath,caseCommand +,,n,system-test, python3 ./test.py -f 0-others/test_case_template.py +``` + +
+ +## 3.3 Legacy Test + +In the early stage of TDengine development, test cases are run by an internal test framework called TSIM, which is developed in C++. + +### 3.3.1 How to run single test case? + +To run the legacy test cases, please execute the following commands: + +```bash +cd tests/script +./test.sh -f tsim/db/basic1.sim +``` + +### 3.3.2 How to run all legacy test cases? + +```bash +cd tests +./run_all_ci_cases.sh -t legacy # all legacy cases +``` + +### 3.3.3 How to add new cases? + +> [!NOTE] +> TSIM test framwork is deprecated by system test now, it is encouraged to add new test cases in system test, please refer to [System Test](#32-system-test) for details. + +## 3.4 Smoke Test + +Smoke test is a group of test cases selected from system test, which is also known as sanity test to ensure the critical functionalities of TDengine. + +### 3.4.1 How to run test? + +```bash +cd /root/TDengine/packaging/smokeTest +./test_smoking_selfhost.sh +``` + +### 3.4.2 How to add new cases? + +New cases can be added by updating the value of `commands` variable in `test_smoking_selfhost.sh`. + +## 3.5 Chaos Test + +A simple tool to execute various functions of the system in a randomized way, hoping to expose potential problems without a pre-defined test scenario. + +### 3.5.1 How to run test? + +```bash +cd tests/pytest +python3 auto_crash_gen.py +``` + +### 3.5.2 How to add new cases? + +1. Add a function, such as `TaskCreateNewFunction` in `pytest/crash_gen/crash_gen_main.py`. +2. Integrate `TaskCreateNewFunction` into the `balance_pickTaskType` function in `crash_gen_main.py`. + +## 3.6 CI Test + +CI testing (Continuous Integration testing), is an important practice in software development that aims to automate frequent integration of code into a shared codebase, build and test it to ensure code quality and stability. + +TDengine CI testing will run all the test cases from the following three types of tests: unit test, system test and legacy test. + +### 3.6.1 How to run all CI test cases? + +If this is the first time to run all the CI test cases, it is recommended to add the test branch, please run it with following commands: + +```bash +cd tests +./run_all_ci_cases.sh -b main # on main branch +``` + +### 3.6.2 How to add new cases? + +Please refer to the [Unit Test](#31-unit-test)、[System Test](#32-system-test) and [Legacy Test](#33-legacy-test) sections for detailed steps to add new test cases, when new cases are added in aboved tests, they will be run automatically by CI test. diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 4d10291be2..0201c88d2b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -331,6 +331,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts5466.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_td33504.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts-5473.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts5906.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-32187.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/td-33225.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_ts4563.py diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index 316f2ead0f..a35beb3395 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -244,7 +244,7 @@ def start_taosd(): else: pass - start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path) + start_cmd = 'cd %s && python3 test.py -G >>/dev/null '%(start_path) os.system(start_cmd) def get_cmds(args_list): @@ -371,7 +371,7 @@ Result: {msg_dict[status]} Details Owner: Jayden Jia Start time: {starttime} -End time: {endtime} +End time: {endtime} Hostname: {hostname} Commit: {git_commit} Cmd: {cmd} @@ -380,14 +380,13 @@ Core dir: {core_dir} ''' text_result=text.split("Result: ")[1].split("Details")[0].strip() print(text_result) - if text_result == "success": - send_msg(notification_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) - send_msg(notification_robot_url, get_msg(text)) - - #send_msg(get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index b7af68cd2f..0bd70ebf3f 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -245,7 +245,7 @@ def start_taosd(): else: pass - start_cmd = 'cd %s && python3 test.py '%(start_path) + start_cmd = 'cd %s && python3 test.py -G'%(start_path) os.system(start_cmd +">>/dev/null") def get_cmds(args_list): @@ -404,24 +404,24 @@ Result: {msg_dict[status]} Details Owner: Jayden Jia Start time: {starttime} -End time: {endtime} +End time: {endtime} Hostname: {hostname} Commit: {git_commit} Cmd: {cmd} Log dir: {log_dir} Core dir: {core_dir} ''' - + text_result=text.split("Result: ")[1].split("Details")[0].strip() print(text_result) - + if text_result == "success": send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) send_msg(notification_robot_url, get_msg(text)) - - #send_msg(get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index df40b60967..b4b90e1f5e 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -236,7 +236,7 @@ def start_taosd(): else: pass - start_cmd = 'cd %s && python3 test.py -N 4 -M 1 '%(start_path) + start_cmd = 'cd %s && python3 test.py -N 4 -M 1 -G '%(start_path) os.system(start_cmd +">>/dev/null") def get_cmds(args_list): @@ -388,28 +388,28 @@ def main(): text = f''' Result: {msg_dict[status]} - + Details Owner: Jayden Jia Start time: {starttime} -End time: {endtime} +End time: {endtime} Hostname: {hostname} Commit: {git_commit} Cmd: {cmd} Log dir: {log_dir} Core dir: {core_dir} ''' - + text_result=text.split("Result: ")[1].split("Details")[0].strip() print(text_result) - + if text_result == "success": send_msg(notification_robot_url, get_msg(text)) else: - send_msg(alert_robot_url, get_msg(text)) - send_msg(notification_robot_url, get_msg(text)) - - #send_msg(get_msg(text)) + send_msg(alert_robot_url, get_msg(text)) + send_msg(notification_robot_url, get_msg(text)) + + #send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) diff --git a/tests/run_all_ci_cases.sh b/tests/run_all_ci_cases.sh index 41040f3c43..1374fc1431 100755 --- a/tests/run_all_ci_cases.sh +++ b/tests/run_all_ci_cases.sh @@ -23,20 +23,24 @@ function printHelp() { echo " -b [Build test branch] Build test branch (default: null)" echo " Options: " echo " e.g., -b main (pull main branch, build and install)" + echo " -t [Run test cases] Run test cases type(default: all)" + echo " Options: " + echo " e.g., -t all/python/legacy" echo " -s [Save cases log] Save cases log(default: notsave)" echo " Options:" - echo " e.g., -c notsave : do not save the log " - echo " -c save : default save ci case log in Project dir/tests/ci_bak" + echo " e.g., -s notsave : do not save the log " + echo " -s save : default save ci case log in Project dir/tests/ci_bak" exit 0 } # Initialization parameter PROJECT_DIR="" BRANCH="" +TEST_TYPE="" SAVE_LOG="notsave" # Parse command line parameters -while getopts "hb:d:s:" arg; do +while getopts "hb:d:t:s:" arg; do case $arg in d) PROJECT_DIR=$OPTARG @@ -44,6 +48,9 @@ while getopts "hb:d:s:" arg; do b) BRANCH=$OPTARG ;; + t) + TEST_TYPE=$OPTARG + ;; s) SAVE_LOG=$OPTARG ;; @@ -315,9 +322,9 @@ function runTest() { [ -d sim ] && rm -rf sim [ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT - runUnitTest runSimCases runPythonCases + runUnitTest stopTaosd cd $TDENGINE_DIR/tests/script @@ -361,7 +368,13 @@ print_color "$GREEN" "Run all ci test cases" | tee -a $WORK_DIR/date.log stopTaosd -runTest +if [ -z "$TEST_TYPE" -o "$TEST_TYPE" = "all" -o "$TEST_TYPE" = "ALL" ]; then + runTest +elif [ "$TEST_TYPE" = "python" -o "$TEST_TYPE" = "PYTHON" ]; then + runPythonCases +elif [ "$TEST_TYPE" = "legacy" -o "$TEST_TYPE" = "LEGACY" ]; then + runSimCases +fi date >> $WORK_DIR/date.log print_color "$GREEN" "End of ci test cases" | tee -a $WORK_DIR/date.log \ No newline at end of file diff --git a/tests/system-test/0-others/test_case_template.py b/tests/system-test/0-others/test_case_template.py new file mode 100644 index 0000000000..fa1a9b5ade --- /dev/null +++ b/tests/system-test/0-others/test_case_template.py @@ -0,0 +1,55 @@ + +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +from util.dnodes import * +from util.common import * + + +class TDTestCase: + + """ + Here is the class description for the whole file cases + """ + + # add the configuration of the client and server here + clientCfgDict = {'debugFlag': 131} + updatecfgDict = { + "debugFlag" : "131", + "queryBufferSize" : 10240, + 'clientCfg' : clientCfgDict + } + + def init(self, conn, logSql, replicaVar=1): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.replicaVar = int(replicaVar) + + + def test_function(self): # case function should be named start with test_ + """ + Here is the function description for single test: + Test case for custom function + """ + tdLog.info(f"Test case test custom function") + # excute the sql + tdSql.execute(f"create database db_test_function") + tdSql.execute(f"create table db_test_function.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);") + # qury the result and return the result + tdSql.query(f"show databases") + # print result and check the result + database_info = tdLog.info(f"{tdSql.queryResult}") + tdSql.checkRows(3) + tdSql.checkData(2,0,"db_test_function") + + + def run(self): + self.test_function() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py index 355ac04707..c0a81720ae 100644 --- a/tests/system-test/2-query/smaTest.py +++ b/tests/system-test/2-query/smaTest.py @@ -75,6 +75,7 @@ class TDTestCase: tdLog.debug(" LIMIT test_case2 ............ [OK]") self.test_TD_33336() + self.ts5900() # stop def stop(self): @@ -137,6 +138,47 @@ class TDTestCase: tdLog.debug("INSERT TABLE DATA ............ [OK]") return + + def ts5900query(self): + sql = "select max(c0) from ts5900.tt1" + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, '99.0') + sql = "select min(c0) from ts5900.tt1" + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, '1.0') + + def ts5900(self): + tdSql.execute("drop database if exists ts5900;") + tdSql.execute("create database ts5900;") + + tdSql.execute("create table ts5900.meters (ts timestamp, c0 varchar(64)) tags(t0 varchar(64));") + + sql = "CREATE TABLE ts5900.`tt1` USING ts5900.`meters` TAGS ('t11')" + tdSql.execute(sql) + for i in range(155): + tdSql.query(f"insert into ts5900.tt1 values(now+{i*10}s, '{i+1}.0')") + tdSql.query("insert into ts5900.tt1 values(now, '1.2')") + tdSql.query("insert into ts5900.tt1 values(now+1s, '2.0')") + tdSql.query("insert into ts5900.tt1 values(now+2s, '3.0')") + tdSql.query("insert into ts5900.tt1 values(now+3s, '105.0')") + tdSql.query("insert into ts5900.tt1 values(now+4s, '4.0')") + + sql = "select count(*) from ts5900.tt1" + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, '160') + + for i in range(10): + tdSql.execute("flush database ts5900") + time.sleep(1) + self.ts5900query() + tdSql.query(f"insert into ts5900.tt1 values(now, '23.0')") + self.ts5900query() + tdLog.info(f"ts5900 test {i} ............ [OK]") + time.sleep(1) + # test case1 base # def test_case1(self): diff --git a/tests/system-test/7-tmq/tmq_ts5906.py b/tests/system-test/7-tmq/tmq_ts5906.py new file mode 100644 index 0000000000..13e756baa3 --- /dev/null +++ b/tests/system-test/7-tmq/tmq_ts5906.py @@ -0,0 +1,90 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from taos.tmq import * +from taos import * + +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 143, 'asynclog': 0} + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def test(self): + tdSql.execute(f'create database if not exists db vgroups 1') + tdSql.execute(f'use db') + tdSql.execute(f'CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)') + tdSql.execute("INSERT INTO d1001 USING meters TAGS('California.SanFrancisco1', 2) VALUES('2018-10-05 14:38:05.000',10.30000,219,0.31000)") + + + tdSql.execute(f'create topic t0 as select * from meters') + + consumer_dict = { + "group.id": "g1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "earliest", + } + consumer = Consumer(consumer_dict) + + try: + consumer.subscribe(["t0"]) + except TmqError: + tdLog.exit(f"subscribe error") + + index = 0; + try: + while True: + if index == 2: + break + res = consumer.poll(5) + print(res) + if not res: + print("res null") + break + val = res.value() + if val is None: + continue + for block in val: + data = block.fetchall() + for element in data: + print(f"data len: {len(data)}") + print(element) + if index == 0 and data[0][-1] != 2: + tdLog.exit(f"error: {data[0][-1]}") + if index == 1 and data[0][-1] != 100: + tdLog.exit(f"error: {data[0][-1]}") + + tdSql.execute("alter table d1001 set tag groupId = 100") + tdSql.execute("INSERT INTO d1001 VALUES('2018-10-05 14:38:06.000',10.30000,219,0.31000)") + index += 1 + finally: + consumer.close() + + + def run(self): + self.test() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/test.py b/tests/system-test/test.py index 0d40544be8..ab1bdc21d3 100644 --- a/tests/system-test/test.py +++ b/tests/system-test/test.py @@ -58,12 +58,12 @@ def checkRunTimeError(): if hwnd: os.system("TASKKILL /F /IM taosd.exe") -# +# # run case on previous cluster # def runOnPreviousCluster(host, config, fileName): print("enter run on previeous") - + # load case module sep = "/" if platform.system().lower() == 'windows': @@ -113,8 +113,9 @@ if __name__ == "__main__": asan = False independentMnode = False previousCluster = False - opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP', [ - 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous']) + crashGen = False + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP:G', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous',"crashGen"]) for key, value in opts: if key in ['-h', '--help']: tdLog.printNoPrefix( @@ -141,6 +142,7 @@ if __name__ == "__main__": tdLog.printNoPrefix('-i independentMnode Mnode') tdLog.printNoPrefix('-a address sanitizer mode') tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.') + tdLog.printNoPrefix('-G crashGen mode') sys.exit(0) @@ -208,7 +210,7 @@ if __name__ == "__main__": if key in ['-R', '--restful']: restful = True - + if key in ['-W', '--websocket']: websocket = True @@ -228,6 +230,10 @@ if __name__ == "__main__": if key in ['-P', '--previous']: previousCluster = True + if key in ['-G', '--crashGen']: + crashGen = True + + # # do exeCmd command # @@ -405,7 +411,7 @@ if __name__ == "__main__": for dnode in tdDnodes.dnodes: tdDnodes.starttaosd(dnode.index) tdCases.logSql(logSql) - + if restful or websocket: tAdapter.deploy(adapter_cfg_dict) tAdapter.start() @@ -450,7 +456,7 @@ if __name__ == "__main__": else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") - + if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True: conn = None else: @@ -640,7 +646,7 @@ if __name__ == "__main__": else: tdLog.debug(res) tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") - + # run case if testCluster: @@ -692,6 +698,7 @@ if __name__ == "__main__": # tdDnodes.StopAllSigint() tdLog.info("Address sanitizer mode finished") else: - tdDnodes.stopAll() + if not crashGen: + tdDnodes.stopAll() tdLog.info("stop all td process finished") sys.exit(0) diff --git a/tests/unit-test/test.sh b/tests/unit-test/test.sh index 21461bc6a5..46fc0aedb3 100755 --- a/tests/unit-test/test.sh +++ b/tests/unit-test/test.sh @@ -7,10 +7,10 @@ function usage() { } ent=1 -while getopts "eh" opt; do +while getopts "e:h" opt; do case $opt in e) - ent=1 + ent="$OPTARG" ;; h) usage